Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.93.4.2.4.8
      1  1.93.4.2.4.8      matt /*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.8 2012/02/29 18:03:40 matt Exp $	*/
      2           1.1       mrg 
      3          1.34       chs /*
      4           1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5          1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6           1.1       mrg  *
      7           1.1       mrg  * All rights reserved.
      8           1.1       mrg  *
      9           1.1       mrg  * This code is derived from software contributed to Berkeley by
     10           1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11           1.1       mrg  *
     12           1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13           1.1       mrg  * modification, are permitted provided that the following conditions
     14           1.1       mrg  * are met:
     15           1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16           1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17           1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18           1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19           1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20           1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21           1.1       mrg  *    must display the following acknowledgement:
     22           1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23          1.34       chs  *      Washington University, the University of California, Berkeley and
     24           1.1       mrg  *      its contributors.
     25           1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26           1.1       mrg  *    may be used to endorse or promote products derived from this software
     27           1.1       mrg  *    without specific prior written permission.
     28           1.1       mrg  *
     29           1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30           1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31           1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32           1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33           1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34           1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35           1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36           1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37           1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38           1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39           1.1       mrg  * SUCH DAMAGE.
     40           1.1       mrg  *
     41           1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42           1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43           1.1       mrg  *
     44           1.1       mrg  *
     45           1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46           1.1       mrg  * All rights reserved.
     47          1.34       chs  *
     48           1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49           1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50           1.1       mrg  * notice and this permission notice appear in all copies of the
     51           1.1       mrg  * software, derivative works or modified versions, and any portions
     52           1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53          1.34       chs  *
     54          1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55          1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56           1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57          1.34       chs  *
     58           1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59           1.1       mrg  *
     60           1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61           1.1       mrg  *  School of Computer Science
     62           1.1       mrg  *  Carnegie Mellon University
     63           1.1       mrg  *  Pittsburgh PA 15213-3890
     64           1.1       mrg  *
     65           1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66           1.1       mrg  * rights to redistribute these changes.
     67           1.1       mrg  */
     68           1.1       mrg 
     69           1.1       mrg /*
     70           1.1       mrg  * uvm_pdaemon.c: the page daemon
     71           1.1       mrg  */
     72          1.42     lukem 
     73          1.42     lukem #include <sys/cdefs.h>
     74  1.93.4.2.4.8      matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.8 2012/02/29 18:03:40 matt Exp $");
     75          1.42     lukem 
     76          1.42     lukem #include "opt_uvmhist.h"
     77          1.69      yamt #include "opt_readahead.h"
     78           1.1       mrg 
     79           1.1       mrg #include <sys/param.h>
     80           1.1       mrg #include <sys/proc.h>
     81           1.1       mrg #include <sys/systm.h>
     82           1.1       mrg #include <sys/kernel.h>
     83           1.9        pk #include <sys/pool.h>
     84          1.24       chs #include <sys/buf.h>
     85      1.93.4.2       snj #include <sys/atomic.h>
     86           1.1       mrg 
     87           1.1       mrg #include <uvm/uvm.h>
     88          1.77      yamt #include <uvm/uvm_pdpolicy.h>
     89           1.1       mrg 
     90           1.1       mrg /*
     91          1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     92          1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     93          1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     94          1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     95          1.14       chs  */
     96          1.14       chs 
     97          1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     98          1.14       chs 
     99          1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
    100          1.14       chs 
    101          1.14       chs /*
    102           1.1       mrg  * local prototypes
    103           1.1       mrg  */
    104           1.1       mrg 
    105  1.93.4.2.4.3      matt static void	uvmpd_scan(struct uvm_pggroup *);
    106  1.93.4.2.4.3      matt static void	uvmpd_scan_queue(struct uvm_pggroup *);
    107          1.65   thorpej static void	uvmpd_tune(void);
    108           1.1       mrg 
    109  1.93.4.2.4.4      matt static void	uvmpd_checkgroup(const struct uvm_pggroup *);
    110  1.93.4.2.4.4      matt 
    111  1.93.4.2.4.3      matt static struct uvm_pdinfo {
    112  1.93.4.2.4.3      matt 	unsigned int pd_waiters;
    113  1.93.4.2.4.3      matt 	unsigned int pd_scans_neededs;
    114  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pagingq;
    115  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pendingq;
    116  1.93.4.2.4.3      matt } uvm_pdinfo =  {
    117  1.93.4.2.4.3      matt 	.pd_pagingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pagingq),
    118  1.93.4.2.4.3      matt 	.pd_pendingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pendingq),
    119  1.93.4.2.4.3      matt };
    120          1.89        ad 
    121           1.1       mrg /*
    122          1.61       chs  * XXX hack to avoid hangs when large processes fork.
    123          1.61       chs  */
    124      1.93.4.2       snj u_int uvm_extrapages;
    125          1.61       chs 
    126          1.61       chs /*
    127           1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    128           1.1       mrg  *
    129           1.1       mrg  * => should be called with all locks released
    130           1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    131           1.1       mrg  */
    132           1.1       mrg 
    133          1.19   thorpej void
    134          1.65   thorpej uvm_wait(const char *wmsg)
    135           1.8       mrg {
    136           1.8       mrg 	int timo = 0;
    137          1.89        ad 
    138          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    139           1.1       mrg 
    140           1.8       mrg 	/*
    141           1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    142           1.8       mrg 	 */
    143           1.1       mrg 
    144          1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    145           1.8       mrg 		/*
    146           1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    147           1.8       mrg 		 * sleep until it frees more memory.   but how can it
    148           1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    149           1.8       mrg 		 * we have two options:
    150           1.8       mrg 		 *  [1] panic now
    151           1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    152           1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    153           1.8       mrg 		 *
    154           1.8       mrg 		 * note that option [2] will only help us if we get lucky
    155           1.8       mrg 		 * and some other process on the system breaks the deadlock
    156           1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    157           1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    158           1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    159           1.8       mrg 		 * yet, this should never happen in the first place!).
    160           1.8       mrg 		 */
    161           1.1       mrg 
    162           1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    163           1.8       mrg 		timo = hz >> 3;		/* set timeout */
    164           1.1       mrg #if defined(DEBUG)
    165           1.8       mrg 		/* DEBUG: panic so we can debug it */
    166           1.8       mrg 		panic("pagedaemon deadlock");
    167           1.1       mrg #endif
    168           1.8       mrg 	}
    169           1.1       mrg 
    170  1.93.4.2.4.3      matt 	uvm_pdinfo.pd_waiters++;
    171          1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    172          1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    173  1.93.4.2.4.7      matt 	uvm_pdinfo.pd_waiters--;
    174           1.1       mrg }
    175           1.1       mrg 
    176  1.93.4.2.4.4      matt 
    177  1.93.4.2.4.4      matt static void
    178  1.93.4.2.4.4      matt uvmpd_checkgroup(const struct uvm_pggroup *grp)
    179  1.93.4.2.4.4      matt {
    180  1.93.4.2.4.4      matt #ifdef DEBUG
    181  1.93.4.2.4.4      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    182  1.93.4.2.4.4      matt 	bool in_pendingq = false;
    183  1.93.4.2.4.4      matt 	bool in_pagingq = false;
    184  1.93.4.2.4.4      matt 	const struct uvm_pggroup *tstgrp;
    185  1.93.4.2.4.4      matt 
    186  1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pendingq, pgrp_pending_link) {
    187  1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    188  1.93.4.2.4.4      matt 			in_pendingq = true;
    189  1.93.4.2.4.4      matt 			break;
    190  1.93.4.2.4.4      matt 		}
    191  1.93.4.2.4.4      matt 	}
    192  1.93.4.2.4.4      matt 
    193  1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pagingq, pgrp_paging_link) {
    194  1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    195  1.93.4.2.4.4      matt 			in_pagingq = true;
    196  1.93.4.2.4.4      matt 			break;
    197  1.93.4.2.4.4      matt 		}
    198  1.93.4.2.4.4      matt 	}
    199  1.93.4.2.4.4      matt 
    200  1.93.4.2.4.4      matt 	if (grp->pgrp_paging > 0) {
    201  1.93.4.2.4.4      matt 		KASSERT(in_pagingq);
    202  1.93.4.2.4.4      matt 		KASSERT(!in_pendingq);
    203  1.93.4.2.4.4      matt 	} else {
    204  1.93.4.2.4.4      matt 		KASSERT(!in_pagingq);
    205  1.93.4.2.4.4      matt 		KASSERT(in_pendingq == grp->pgrp_scan_needed);
    206  1.93.4.2.4.4      matt 	}
    207  1.93.4.2.4.4      matt #endif
    208  1.93.4.2.4.4      matt }
    209  1.93.4.2.4.4      matt 
    210          1.77      yamt /*
    211          1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    212          1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    213          1.89        ad  *
    214          1.89        ad  * => called with uvm_fpageqlock held.
    215          1.77      yamt  */
    216          1.77      yamt 
    217          1.77      yamt void
    218          1.77      yamt uvm_kick_pdaemon(void)
    219          1.77      yamt {
    220  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    221  1.93.4.2.4.3      matt 	bool need_wakeup = false;
    222  1.93.4.2.4.3      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    223          1.77      yamt 
    224          1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    225          1.89        ad 
    226  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    227  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    228  1.93.4.2.4.3      matt 		const bool prev_scan_needed = grp->pgrp_scan_needed;
    229  1.93.4.2.4.3      matt 
    230  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    231  1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    232  1.93.4.2.4.3      matt 
    233  1.93.4.2.4.3      matt 		grp->pgrp_scan_needed =
    234  1.93.4.2.4.3      matt 		    grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freemin
    235  1.93.4.2.4.3      matt 		    || (grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freetarg
    236  1.93.4.2.4.3      matt 			&& uvmpdpol_needsscan_p(grp));
    237  1.93.4.2.4.3      matt 
    238  1.93.4.2.4.3      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    239  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d->%d (scan=%d)",
    240  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, prev_scan_needed,
    241  1.93.4.2.4.3      matt 			    grp->pgrp_scan_needed, uvmpdpol_needsscan_p(grp));
    242  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d < min(%d,%d)",
    243  1.93.4.2.4.3      matt 			    grp - uvm.pggroups,
    244  1.93.4.2.4.3      matt 			    grp->pgrp_free + grp->pgrp_paging,
    245  1.93.4.2.4.3      matt 			    grp->pgrp_freemin, grp->pgrp_freetarg);
    246  1.93.4.2.4.3      matt 		}
    247  1.93.4.2.4.3      matt 
    248  1.93.4.2.4.4      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    249  1.93.4.2.4.3      matt 			if (grp->pgrp_scan_needed) {
    250  1.93.4.2.4.3      matt 				TAILQ_INSERT_TAIL(&pdinfo->pd_pendingq,
    251  1.93.4.2.4.4      matt 				    grp, pgrp_pending_link);
    252  1.93.4.2.4.3      matt 				need_wakeup = true;
    253  1.93.4.2.4.3      matt 			} else {
    254  1.93.4.2.4.3      matt 				TAILQ_REMOVE(&pdinfo->pd_pendingq,
    255  1.93.4.2.4.4      matt 				    grp, pgrp_pending_link);
    256  1.93.4.2.4.3      matt 			}
    257  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    258  1.93.4.2.4.3      matt 		}
    259          1.77      yamt 	}
    260  1.93.4.2.4.3      matt 
    261  1.93.4.2.4.3      matt 	if (need_wakeup)
    262  1.93.4.2.4.3      matt 		wakeup(&uvm.pagedaemon);
    263  1.93.4.2.4.3      matt 
    264  1.93.4.2.4.3      matt 	UVMHIST_LOG(pdhist, " <- done: wakeup=%d!",
    265  1.93.4.2.4.4      matt 	    need_wakeup, 0, 0, 0);
    266          1.77      yamt }
    267           1.1       mrg 
    268           1.1       mrg /*
    269           1.1       mrg  * uvmpd_tune: tune paging parameters
    270           1.1       mrg  *
    271           1.1       mrg  * => called when ever memory is added (or removed?) to the system
    272           1.1       mrg  * => caller must call with page queues locked
    273           1.1       mrg  */
    274           1.1       mrg 
    275          1.65   thorpej static void
    276          1.37       chs uvmpd_tune(void)
    277           1.8       mrg {
    278  1.93.4.2.4.3      matt 	u_int extrapages = atomic_swap_uint(&uvm_extrapages, 0) / uvmexp.ncolors;
    279  1.93.4.2.4.3      matt 	u_int freemin = 0;
    280  1.93.4.2.4.3      matt 	u_int freetarg = 0;
    281  1.93.4.2.4.3      matt 	u_int wiredmax = 0;
    282      1.93.4.2       snj 
    283           1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    284           1.1       mrg 
    285  1.93.4.2.4.3      matt 	extrapages = roundup(extrapages, uvmexp.npggroups);
    286  1.93.4.2.4.3      matt 
    287  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    288  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    289  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    290  1.93.4.2.4.3      matt 
    291  1.93.4.2.4.3      matt 		/*
    292  1.93.4.2.4.3      matt 		 * try to keep 0.5% of available RAM free, but limit
    293  1.93.4.2.4.3      matt 		 * to between 128k and 1024k per-CPU.
    294  1.93.4.2.4.3      matt 		 * XXX: what are these values good for?
    295  1.93.4.2.4.3      matt 		 */
    296  1.93.4.2.4.3      matt 		u_int val = grp->pgrp_npages / 200;
    297  1.93.4.2.4.3      matt 		val = MAX(val, (128*1024) >> PAGE_SHIFT);
    298  1.93.4.2.4.3      matt 		val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    299  1.93.4.2.4.3      matt 		val *= ncpu;
    300  1.93.4.2.4.3      matt 
    301  1.93.4.2.4.3      matt 		/* Make sure there's always a user page free. */
    302  1.93.4.2.4.3      matt 		if (val * uvmexp.npggroups <= uvmexp.reserve_kernel)
    303  1.93.4.2.4.3      matt 			val = uvmexp.reserve_kernel / uvmexp.npggroups + 1;
    304  1.93.4.2.4.3      matt 
    305  1.93.4.2.4.3      matt 		grp->pgrp_freemin = val;
    306  1.93.4.2.4.3      matt 
    307  1.93.4.2.4.3      matt 		/* Calculate freetarg. */
    308  1.93.4.2.4.3      matt 		val = (grp->pgrp_freemin * 4) / 3;
    309  1.93.4.2.4.3      matt 		if (val <= grp->pgrp_freemin)
    310  1.93.4.2.4.3      matt 			val = grp->pgrp_freemin + 1;
    311  1.93.4.2.4.8      matt #ifdef VM_FREELIST_NORMALOK_P
    312  1.93.4.2.4.8      matt 		if (!VM_FREELIST_NORMALOK_P(grp->pgrp_free_list))
    313  1.93.4.2.4.8      matt 			val *= 4;
    314  1.93.4.2.4.8      matt #endif
    315  1.93.4.2.4.3      matt 		grp->pgrp_freetarg = val + extrapages / uvmexp.npggroups;
    316  1.93.4.2.4.3      matt 		if (grp->pgrp_freetarg > grp->pgrp_npages / 2)
    317  1.93.4.2.4.3      matt 			grp->pgrp_freetarg = grp->pgrp_npages / 2;
    318  1.93.4.2.4.3      matt 
    319  1.93.4.2.4.3      matt 		grp->pgrp_wiredmax = grp->pgrp_npages / 3;
    320  1.93.4.2.4.3      matt 		UVMHIST_LOG(pdhist,
    321  1.93.4.2.4.3      matt 		    "[%zd]: freemin=%d, freetarg=%d, wiredmax=%d",
    322  1.93.4.2.4.3      matt 		    grp - uvm.pggroups, grp->pgrp_freemin, grp->pgrp_freetarg,
    323  1.93.4.2.4.3      matt 		    grp->pgrp_wiredmax);
    324  1.93.4.2.4.3      matt 
    325  1.93.4.2.4.3      matt 		freemin += grp->pgrp_freemin;
    326  1.93.4.2.4.3      matt 		freetarg += grp->pgrp_freetarg;
    327  1.93.4.2.4.3      matt 		wiredmax += grp->pgrp_wiredmax;
    328  1.93.4.2.4.3      matt 	}
    329  1.93.4.2.4.3      matt 
    330  1.93.4.2.4.3      matt 	uvmexp.freemin = freemin;
    331  1.93.4.2.4.3      matt 	uvmexp.freetarg = freetarg;
    332  1.93.4.2.4.3      matt 	uvmexp.wiredmax = wiredmax;
    333          1.61       chs 
    334           1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    335  1.93.4.2.4.3      matt 	    uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    336           1.1       mrg }
    337           1.1       mrg 
    338           1.1       mrg /*
    339           1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    340           1.1       mrg  */
    341           1.1       mrg 
    342           1.8       mrg void
    343          1.80      yamt uvm_pageout(void *arg)
    344           1.8       mrg {
    345  1.93.4.2.4.3      matt 	u_int npages = 0;
    346  1.93.4.2.4.3      matt 	u_int extrapages = 0;
    347  1.93.4.2.4.3      matt 	u_int npggroups = 0;
    348          1.88        ad 	struct pool *pp;
    349          1.88        ad 	uint64_t where;
    350  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    351           1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    352          1.24       chs 
    353           1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    354           1.8       mrg 
    355           1.8       mrg 	/*
    356           1.8       mrg 	 * ensure correct priority and set paging parameters...
    357           1.8       mrg 	 */
    358           1.8       mrg 
    359          1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    360          1.89        ad 	mutex_enter(&uvm_pageqlock);
    361           1.8       mrg 	npages = uvmexp.npages;
    362           1.8       mrg 	uvmpd_tune();
    363          1.89        ad 	mutex_exit(&uvm_pageqlock);
    364           1.8       mrg 
    365           1.8       mrg 	/*
    366           1.8       mrg 	 * main loop
    367           1.8       mrg 	 */
    368          1.24       chs 
    369          1.24       chs 	for (;;) {
    370  1.93.4.2.4.3      matt 		struct uvm_pggroup *grp;
    371  1.93.4.2.4.3      matt 		bool need_free = false;
    372  1.93.4.2.4.3      matt 		u_int bufcnt = 0;
    373          1.24       chs 
    374          1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    375  1.93.4.2.4.3      matt 		/*
    376  1.93.4.2.4.3      matt 		 * If we have no one waiting or all color requests have
    377  1.93.4.2.4.3      matt 		 * active paging, then wait.
    378  1.93.4.2.4.3      matt 		 */
    379  1.93.4.2.4.3      matt 		if (pdinfo->pd_waiters == 0
    380  1.93.4.2.4.7      matt 		    && TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL) {
    381          1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    382          1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    383          1.89        ad 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    384          1.89        ad 			uvmexp.pdwoke++;
    385          1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    386  1.93.4.2.4.7      matt 		} else if (TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL) {
    387  1.93.4.2.4.7      matt 			/*
    388  1.93.4.2.4.7      matt 			 * Someone is waiting but no group are pending.
    389  1.93.4.2.4.7      matt 			 * Let's kick ourselves to find groups that need work.
    390  1.93.4.2.4.7      matt 			 */
    391  1.93.4.2.4.7      matt 			uvm_kick_pdaemon();
    392  1.93.4.2.4.7      matt 			mutex_spin_exit(&uvm_fpageqlock);
    393          1.89        ad 		} else {
    394          1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    395          1.89        ad 		}
    396          1.24       chs 
    397           1.8       mrg 		/*
    398          1.24       chs 		 * now lock page queues and recompute inactive count
    399           1.8       mrg 		 */
    400           1.8       mrg 
    401          1.89        ad 		mutex_enter(&uvm_pageqlock);
    402  1.93.4.2.4.3      matt 		mutex_spin_enter(&uvm_fpageqlock);
    403  1.93.4.2.4.3      matt 
    404  1.93.4.2.4.3      matt 		if (npages != uvmexp.npages
    405  1.93.4.2.4.3      matt 		    || extrapages != uvm_extrapages
    406  1.93.4.2.4.3      matt 		    || npggroups != uvmexp.npggroups) {
    407          1.24       chs 			npages = uvmexp.npages;
    408          1.61       chs 			extrapages = uvm_extrapages;
    409  1.93.4.2.4.3      matt 			npggroups = uvmexp.npggroups;
    410          1.24       chs 			uvmpd_tune();
    411          1.24       chs 		}
    412          1.24       chs 
    413          1.60     enami 		/*
    414          1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    415          1.60     enami 		 * system only when entire pool page is empty.
    416          1.60     enami 		 */
    417  1.93.4.2.4.3      matt 		bool need_wakeup = false;
    418  1.93.4.2.4.3      matt 		while ((grp = TAILQ_FIRST(&pdinfo->pd_pendingq)) != NULL) {
    419  1.93.4.2.4.3      matt 			KASSERT(grp->pgrp_npages > 0);
    420          1.60     enami 
    421  1.93.4.2.4.3      matt 			uvmpdpol_tune(grp);
    422           1.8       mrg 
    423  1.93.4.2.4.4      matt 			/*
    424  1.93.4.2.4.4      matt 			 * While we are locked, remove this from the pendingq.
    425  1.93.4.2.4.4      matt 			 */
    426  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    427  1.93.4.2.4.4      matt 			KASSERT(grp->pgrp_scan_needed);
    428  1.93.4.2.4.4      matt 			TAILQ_REMOVE(&pdinfo->pd_pendingq, grp,
    429  1.93.4.2.4.4      matt 			    pgrp_pending_link);
    430  1.93.4.2.4.4      matt 			grp->pgrp_scan_needed = false;
    431  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    432  1.93.4.2.4.4      matt 
    433  1.93.4.2.4.3      matt 			int diff = grp->pgrp_freetarg - grp->pgrp_free;
    434  1.93.4.2.4.3      matt 			if (diff < 0)
    435  1.93.4.2.4.3      matt 				diff = 0;
    436          1.89        ad 
    437  1.93.4.2.4.3      matt 			bufcnt += diff;
    438           1.8       mrg 
    439  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist," [%zu]: "
    440  1.93.4.2.4.3      matt 			    "free/ftarg/fmin=%u/%u/%u",
    441  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    442  1.93.4.2.4.3      matt 			    grp->pgrp_freetarg, grp->pgrp_freemin);
    443  1.93.4.2.4.3      matt 
    444  1.93.4.2.4.3      matt 
    445  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff)
    446  1.93.4.2.4.3      matt 				need_free = true;
    447  1.93.4.2.4.3      matt 
    448  1.93.4.2.4.3      matt 			/*
    449  1.93.4.2.4.3      matt 			 * scan if needed
    450  1.93.4.2.4.3      matt 			 */
    451  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff
    452  1.93.4.2.4.3      matt 			    || uvmpdpol_needsscan_p(grp)) {
    453  1.93.4.2.4.3      matt 				mutex_spin_exit(&uvm_fpageqlock);
    454  1.93.4.2.4.3      matt 				uvmpd_scan(grp);
    455  1.93.4.2.4.3      matt 				mutex_spin_enter(&uvm_fpageqlock);
    456  1.93.4.2.4.3      matt 			} else {
    457  1.93.4.2.4.3      matt 				UVMHIST_LOG(pdhist,
    458  1.93.4.2.4.3      matt 				    " [%zu]: diff/paging=%u/%u: "
    459  1.93.4.2.4.3      matt 				    "scan skipped",
    460  1.93.4.2.4.3      matt 				    grp - uvm.pggroups, diff,
    461  1.93.4.2.4.3      matt 				    grp->pgrp_paging, 0);
    462  1.93.4.2.4.3      matt 			}
    463  1.93.4.2.4.3      matt 
    464  1.93.4.2.4.3      matt 			/*
    465  1.93.4.2.4.3      matt 			 * if there's any free memory to be had,
    466  1.93.4.2.4.3      matt 			 * wake up any waiters.
    467  1.93.4.2.4.3      matt 			 */
    468  1.93.4.2.4.3      matt 			if (grp->pgrp_free * uvmexp.npggroups > uvmexp.reserve_kernel
    469  1.93.4.2.4.3      matt 			    || grp->pgrp_paging == 0) {
    470  1.93.4.2.4.3      matt 				need_wakeup = true;
    471  1.93.4.2.4.3      matt 			}
    472  1.93.4.2.4.3      matt 
    473  1.93.4.2.4.3      matt 		}
    474  1.93.4.2.4.3      matt 		if (need_wakeup) {
    475          1.24       chs 			wakeup(&uvmexp.free);
    476           1.8       mrg 		}
    477  1.93.4.2.4.3      matt 		KASSERT (!need_free || need_wakeup);
    478          1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    479           1.1       mrg 
    480           1.8       mrg 		/*
    481  1.93.4.2.4.3      matt 		 * scan done.  unlock page queues (the only lock
    482  1.93.4.2.4.3      matt 		 * we are holding)
    483           1.8       mrg 		 */
    484          1.89        ad 		mutex_exit(&uvm_pageqlock);
    485          1.38       chs 
    486          1.88        ad 		/*
    487          1.93        ad 		 * if we don't need free memory, we're done.
    488          1.93        ad 		 */
    489          1.93        ad 
    490  1.93.4.2.4.3      matt 		if (!need_free)
    491          1.93        ad 			continue;
    492          1.93        ad 
    493          1.93        ad 		/*
    494          1.88        ad 		 * start draining pool resources now that we're not
    495          1.88        ad 		 * holding any locks.
    496          1.88        ad 		 */
    497          1.88        ad 		pool_drain_start(&pp, &where);
    498          1.60     enami 
    499          1.38       chs 		/*
    500          1.88        ad 		 * kill unused metadata buffers.
    501          1.38       chs 		 */
    502  1.93.4.2.4.3      matt 		if (bufcnt > 0) {
    503  1.93.4.2.4.3      matt 			mutex_enter(&bufcache_lock);
    504  1.93.4.2.4.3      matt 			buf_drain(bufcnt << PAGE_SHIFT);
    505  1.93.4.2.4.3      matt 			mutex_exit(&bufcache_lock);
    506  1.93.4.2.4.3      matt 		}
    507          1.57  jdolecek 
    508          1.57  jdolecek 		/*
    509          1.88        ad 		 * complete draining the pools.
    510          1.88        ad 		 */
    511          1.88        ad 		pool_drain_end(pp, where);
    512          1.24       chs 	}
    513          1.24       chs 	/*NOTREACHED*/
    514          1.24       chs }
    515          1.24       chs 
    516           1.8       mrg 
    517          1.24       chs /*
    518          1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    519          1.24       chs  */
    520           1.8       mrg 
    521          1.24       chs void
    522          1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    523          1.24       chs {
    524          1.81      yamt 	struct buf *bp = (void *)wk;
    525           1.9        pk 
    526          1.81      yamt 	KASSERT(&bp->b_work == wk);
    527           1.8       mrg 
    528          1.81      yamt 	/*
    529          1.81      yamt 	 * process an i/o that's done.
    530          1.81      yamt 	 */
    531           1.8       mrg 
    532          1.81      yamt 	(*bp->b_iodone)(bp);
    533          1.89        ad }
    534          1.89        ad 
    535          1.89        ad void
    536  1.93.4.2.4.3      matt uvm_pageout_start(struct uvm_pggroup *grp, u_int npages)
    537          1.89        ad {
    538  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    539          1.89        ad 
    540          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    541  1.93.4.2.4.3      matt 
    542  1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    543  1.93.4.2.4.5      matt 	uvmexp.paging += npages;
    544  1.93.4.2.4.3      matt 	if (grp->pgrp_paging == 0) {
    545  1.93.4.2.4.4      matt 		TAILQ_INSERT_TAIL(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    546  1.93.4.2.4.3      matt 	}
    547  1.93.4.2.4.3      matt 	grp->pgrp_paging += npages;
    548  1.93.4.2.4.5      matt 	uvmpd_checkgroup(grp);
    549          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    550          1.89        ad }
    551          1.89        ad 
    552          1.89        ad void
    553  1.93.4.2.4.3      matt uvm_pageout_done(struct vm_page *pg, bool freed)
    554          1.89        ad {
    555  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    556  1.93.4.2.4.3      matt 
    557  1.93.4.2.4.3      matt 	KASSERT(pg->flags & PG_PAGEOUT);
    558          1.89        ad 
    559          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    560  1.93.4.2.4.3      matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    561  1.93.4.2.4.3      matt 
    562  1.93.4.2.4.3      matt 	KASSERT(grp->pgrp_paging > 0);
    563  1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    564  1.93.4.2.4.3      matt 	if (--grp->pgrp_paging == 0) {
    565  1.93.4.2.4.4      matt 		TAILQ_REMOVE(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    566  1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    567  1.93.4.2.4.3      matt 	}
    568  1.93.4.2.4.3      matt 	KASSERT(uvmexp.paging > 0);
    569  1.93.4.2.4.3      matt 	uvmexp.paging--;
    570  1.93.4.2.4.3      matt 	grp->pgrp_pdfreed += freed;
    571          1.89        ad 
    572          1.89        ad 	/*
    573  1.93.4.2.4.5      matt 	 * Page is no longer being paged out.
    574  1.93.4.2.4.5      matt 	 */
    575  1.93.4.2.4.5      matt 	pg->flags &= ~PG_PAGEOUT;
    576  1.93.4.2.4.5      matt 
    577  1.93.4.2.4.5      matt 	/*
    578          1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    579          1.89        ad 	 */
    580  1.93.4.2.4.3      matt 	if (grp->pgrp_free * uvmexp.npggroups <= uvmexp.reserve_kernel) {
    581          1.81      yamt 		wakeup(&uvm.pagedaemon);
    582          1.81      yamt 	} else {
    583          1.81      yamt 		wakeup(&uvmexp.free);
    584           1.8       mrg 	}
    585  1.93.4.2.4.3      matt 
    586          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    587           1.1       mrg }
    588           1.1       mrg 
    589          1.76      yamt /*
    590          1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    591          1.76      yamt  *
    592          1.76      yamt  * => called with pageq locked.
    593          1.76      yamt  * => resolve orphaned O->A loaned page.
    594          1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    595          1.76      yamt  */
    596          1.76      yamt 
    597          1.89        ad kmutex_t *
    598          1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    599          1.76      yamt {
    600          1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    601          1.89        ad 	kmutex_t *slock;
    602          1.89        ad 
    603          1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    604          1.76      yamt 
    605          1.76      yamt 	if (uobj != NULL) {
    606          1.76      yamt 		slock = &uobj->vmobjlock;
    607          1.76      yamt 	} else {
    608          1.76      yamt 		struct vm_anon *anon = pg->uanon;
    609          1.76      yamt 
    610          1.76      yamt 		KASSERT(anon != NULL);
    611          1.76      yamt 		slock = &anon->an_lock;
    612          1.76      yamt 	}
    613          1.76      yamt 
    614          1.89        ad 	if (!mutex_tryenter(slock)) {
    615          1.76      yamt 		return NULL;
    616          1.76      yamt 	}
    617          1.76      yamt 
    618          1.76      yamt 	if (uobj == NULL) {
    619          1.76      yamt 
    620          1.76      yamt 		/*
    621          1.76      yamt 		 * set PQ_ANON if it isn't set already.
    622          1.76      yamt 		 */
    623          1.76      yamt 
    624          1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    625          1.76      yamt 			KASSERT(pg->loan_count > 0);
    626          1.76      yamt 			pg->loan_count--;
    627          1.76      yamt 			pg->pqflags |= PQ_ANON;
    628          1.76      yamt 			/* anon now owns it */
    629          1.76      yamt 		}
    630          1.76      yamt 	}
    631          1.76      yamt 
    632          1.76      yamt 	return slock;
    633          1.76      yamt }
    634          1.76      yamt 
    635          1.73      yamt #if defined(VMSWAP)
    636          1.73      yamt struct swapcluster {
    637          1.73      yamt 	int swc_slot;
    638          1.73      yamt 	int swc_nallocated;
    639          1.73      yamt 	int swc_nused;
    640          1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    641          1.73      yamt };
    642          1.73      yamt 
    643          1.73      yamt static void
    644          1.73      yamt swapcluster_init(struct swapcluster *swc)
    645          1.73      yamt {
    646          1.73      yamt 
    647          1.73      yamt 	swc->swc_slot = 0;
    648          1.89        ad 	swc->swc_nused = 0;
    649          1.73      yamt }
    650          1.73      yamt 
    651          1.73      yamt static int
    652          1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    653          1.73      yamt {
    654          1.73      yamt 	int slot;
    655          1.73      yamt 	int npages;
    656          1.73      yamt 
    657          1.73      yamt 	if (swc->swc_slot != 0) {
    658          1.73      yamt 		return 0;
    659          1.73      yamt 	}
    660          1.73      yamt 
    661          1.73      yamt 	/* Even with strange MAXPHYS, the shift
    662          1.73      yamt 	   implicitly rounds down to a page. */
    663          1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    664          1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    665          1.73      yamt 	if (slot == 0) {
    666          1.73      yamt 		return ENOMEM;
    667          1.73      yamt 	}
    668          1.73      yamt 	swc->swc_slot = slot;
    669          1.73      yamt 	swc->swc_nallocated = npages;
    670          1.73      yamt 	swc->swc_nused = 0;
    671          1.73      yamt 
    672          1.73      yamt 	return 0;
    673          1.73      yamt }
    674          1.73      yamt 
    675          1.73      yamt static int
    676          1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    677          1.73      yamt {
    678          1.73      yamt 	int slot;
    679          1.73      yamt 	struct uvm_object *uobj;
    680          1.73      yamt 
    681          1.73      yamt 	KASSERT(swc->swc_slot != 0);
    682          1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    683          1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    684          1.73      yamt 
    685          1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    686          1.73      yamt 	uobj = pg->uobject;
    687          1.73      yamt 	if (uobj == NULL) {
    688          1.89        ad 		KASSERT(mutex_owned(&pg->uanon->an_lock));
    689          1.73      yamt 		pg->uanon->an_swslot = slot;
    690          1.73      yamt 	} else {
    691          1.73      yamt 		int result;
    692          1.73      yamt 
    693          1.89        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
    694          1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    695          1.73      yamt 		if (result == -1) {
    696          1.73      yamt 			return ENOMEM;
    697          1.73      yamt 		}
    698          1.73      yamt 	}
    699          1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    700          1.73      yamt 	swc->swc_nused++;
    701          1.73      yamt 
    702          1.73      yamt 	return 0;
    703          1.73      yamt }
    704          1.73      yamt 
    705          1.73      yamt static void
    706  1.93.4.2.4.3      matt swapcluster_flush(struct uvm_pggroup *grp, struct swapcluster *swc, bool now)
    707          1.73      yamt {
    708          1.73      yamt 	int slot;
    709  1.93.4.2.4.3      matt 	u_int nused;
    710          1.73      yamt 	int nallocated;
    711          1.73      yamt 	int error;
    712          1.73      yamt 
    713          1.73      yamt 	if (swc->swc_slot == 0) {
    714          1.73      yamt 		return;
    715          1.73      yamt 	}
    716          1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    717          1.73      yamt 
    718          1.73      yamt 	slot = swc->swc_slot;
    719          1.73      yamt 	nused = swc->swc_nused;
    720          1.73      yamt 	nallocated = swc->swc_nallocated;
    721          1.73      yamt 
    722          1.73      yamt 	/*
    723          1.73      yamt 	 * if this is the final pageout we could have a few
    724          1.73      yamt 	 * unused swap blocks.  if so, free them now.
    725          1.73      yamt 	 */
    726          1.73      yamt 
    727          1.73      yamt 	if (nused < nallocated) {
    728          1.73      yamt 		if (!now) {
    729          1.73      yamt 			return;
    730          1.73      yamt 		}
    731          1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    732          1.73      yamt 	}
    733          1.73      yamt 
    734          1.73      yamt 	/*
    735          1.73      yamt 	 * now start the pageout.
    736          1.73      yamt 	 */
    737          1.73      yamt 
    738          1.91      yamt 	if (nused > 0) {
    739  1.93.4.2.4.3      matt 		grp->pgrp_pdpageouts++;
    740  1.93.4.2.4.3      matt 		uvmexp.pdpageouts++;	/* procfs */
    741  1.93.4.2.4.3      matt 		uvm_pageout_start(grp, nused);
    742          1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    743          1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    744          1.91      yamt 	}
    745          1.73      yamt 
    746          1.73      yamt 	/*
    747          1.73      yamt 	 * zero swslot to indicate that we are
    748          1.73      yamt 	 * no longer building a swap-backed cluster.
    749          1.73      yamt 	 */
    750          1.73      yamt 
    751          1.73      yamt 	swc->swc_slot = 0;
    752          1.89        ad 	swc->swc_nused = 0;
    753          1.89        ad }
    754          1.89        ad 
    755          1.89        ad static int
    756          1.89        ad swapcluster_nused(struct swapcluster *swc)
    757          1.89        ad {
    758          1.89        ad 
    759          1.89        ad 	return swc->swc_nused;
    760          1.73      yamt }
    761          1.77      yamt 
    762          1.77      yamt /*
    763          1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    764          1.77      yamt  *
    765          1.77      yamt  * => called with owner locked.
    766          1.84   thorpej  * => return true if a page had an associated slot.
    767          1.77      yamt  */
    768          1.77      yamt 
    769          1.83   thorpej static bool
    770          1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    771          1.77      yamt {
    772          1.84   thorpej 	bool result = false;
    773          1.77      yamt 	struct vm_anon *anon = pg->uanon;
    774          1.77      yamt 
    775          1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    776          1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    777          1.77      yamt 		anon->an_swslot = 0;
    778          1.77      yamt 		pg->flags &= ~PG_CLEAN;
    779          1.84   thorpej 		result = true;
    780          1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    781          1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    782          1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    783          1.77      yamt 		if (slot) {
    784          1.77      yamt 			uvm_swap_free(slot, 1);
    785          1.77      yamt 			pg->flags &= ~PG_CLEAN;
    786          1.84   thorpej 			result = true;
    787          1.77      yamt 		}
    788          1.77      yamt 	}
    789          1.77      yamt 
    790          1.77      yamt 	return result;
    791          1.77      yamt }
    792          1.77      yamt 
    793          1.77      yamt /*
    794          1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    795          1.77      yamt  *
    796          1.84   thorpej  * => return true if a slot is successfully freed.
    797          1.77      yamt  */
    798          1.77      yamt 
    799          1.83   thorpej bool
    800          1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    801          1.77      yamt {
    802          1.89        ad 	kmutex_t *slock;
    803          1.83   thorpej 	bool result;
    804          1.77      yamt 
    805          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    806          1.84   thorpej 		return false;
    807          1.77      yamt 	}
    808          1.77      yamt 
    809          1.77      yamt 	/*
    810          1.77      yamt 	 * lock the page's owner.
    811          1.77      yamt 	 */
    812          1.77      yamt 
    813          1.77      yamt 	slock = uvmpd_trylockowner(pg);
    814          1.77      yamt 	if (slock == NULL) {
    815          1.84   thorpej 		return false;
    816          1.77      yamt 	}
    817          1.77      yamt 
    818          1.77      yamt 	/*
    819          1.77      yamt 	 * skip this page if it's busy.
    820          1.77      yamt 	 */
    821          1.77      yamt 
    822          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    823          1.89        ad 		mutex_exit(slock);
    824          1.84   thorpej 		return false;
    825          1.77      yamt 	}
    826          1.77      yamt 
    827          1.77      yamt 	result = uvmpd_dropswap(pg);
    828          1.77      yamt 
    829          1.89        ad 	mutex_exit(slock);
    830          1.77      yamt 
    831          1.77      yamt 	return result;
    832          1.77      yamt }
    833          1.77      yamt 
    834          1.73      yamt #endif /* defined(VMSWAP) */
    835          1.73      yamt 
    836           1.1       mrg /*
    837          1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    838          1.77      yamt  * to clean or free.
    839           1.1       mrg  *
    840           1.1       mrg  * => called with page queues locked
    841           1.1       mrg  * => we work on meeting our free target by converting inactive pages
    842           1.1       mrg  *    into free pages.
    843           1.1       mrg  * => we handle the building of swap-backed clusters
    844           1.1       mrg  */
    845           1.1       mrg 
    846          1.65   thorpej static void
    847  1.93.4.2.4.3      matt uvmpd_scan_queue(struct uvm_pggroup *grp)
    848           1.8       mrg {
    849  1.93.4.2.4.3      matt 	struct vm_page *pg;
    850           1.8       mrg 	struct uvm_object *uobj;
    851          1.37       chs 	struct vm_anon *anon;
    852          1.68      yamt #if defined(VMSWAP)
    853          1.73      yamt 	struct swapcluster swc;
    854          1.68      yamt #endif /* defined(VMSWAP) */
    855  1.93.4.2.4.6      matt 	u_int dirtyreacts;
    856  1.93.4.2.4.6      matt 	u_int lockownerfail;
    857  1.93.4.2.4.6      matt 	u_int victims;
    858  1.93.4.2.4.6      matt 	u_int freed;
    859  1.93.4.2.4.6      matt 	u_int busy;
    860          1.89        ad 	kmutex_t *slock;
    861          1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    862           1.1       mrg 
    863           1.8       mrg 	/*
    864           1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    865          1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    866           1.8       mrg 	 * a swap-cluster to build.
    867           1.8       mrg 	 */
    868          1.24       chs 
    869          1.73      yamt #if defined(VMSWAP)
    870          1.73      yamt 	swapcluster_init(&swc);
    871          1.73      yamt #endif /* defined(VMSWAP) */
    872          1.77      yamt 
    873          1.14       chs 	dirtyreacts = 0;
    874          1.89        ad 	lockownerfail = 0;
    875  1.93.4.2.4.6      matt 	victims = 0;
    876  1.93.4.2.4.6      matt 	freed = 0;
    877  1.93.4.2.4.6      matt 	busy = 0;
    878  1.93.4.2.4.3      matt 	uvmpdpol_scaninit(grp);
    879          1.43       chs 
    880  1.93.4.2.4.6      matt 	UVMHIST_LOG(pdhist,"  [%zd]: want free target (%u)",
    881  1.93.4.2.4.8      matt 	    grp - uvm.pggroups, grp->pgrp_freetarg << 2, 0, 0);
    882          1.77      yamt 	while (/* CONSTCOND */ 1) {
    883          1.24       chs 
    884          1.73      yamt 		/*
    885          1.73      yamt 		 * see if we've met the free target.
    886          1.73      yamt 		 */
    887          1.73      yamt 
    888  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging
    889          1.89        ad #if defined(VMSWAP)
    890          1.89        ad 		    + swapcluster_nused(&swc)
    891          1.89        ad #endif /* defined(VMSWAP) */
    892  1.93.4.2.4.3      matt 		    >= grp->pgrp_freetarg << 2 ||
    893          1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    894  1.93.4.2.4.6      matt 			UVMHIST_LOG(pdhist,"  [%zd]: met free target (%u + %u)"
    895  1.93.4.2.4.6      matt 			    ", dirty reacts %u",
    896  1.93.4.2.4.6      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    897  1.93.4.2.4.6      matt 			    grp->pgrp_paging, dirtyreacts);
    898          1.73      yamt 			break;
    899          1.73      yamt 		}
    900          1.24       chs 
    901  1.93.4.2.4.3      matt 		pg = uvmpdpol_selectvictim(grp);
    902  1.93.4.2.4.3      matt 		if (pg == NULL) {
    903  1.93.4.2.4.6      matt 			UVMHIST_LOG(pdhist,"  [%zd]: selectvictim didn't",
    904  1.93.4.2.4.6      matt 			    grp - uvm.pggroups, 0, 0, 0);
    905          1.77      yamt 			break;
    906          1.77      yamt 		}
    907  1.93.4.2.4.6      matt 		victims++;
    908  1.93.4.2.4.3      matt 		KASSERT(uvmpdpol_pageisqueued_p(pg));
    909  1.93.4.2.4.3      matt 		KASSERT(pg->wire_count == 0);
    910          1.77      yamt 
    911          1.73      yamt 		/*
    912          1.73      yamt 		 * we are below target and have a new page to consider.
    913          1.73      yamt 		 */
    914          1.30       chs 
    915  1.93.4.2.4.3      matt 		anon = pg->uanon;
    916  1.93.4.2.4.3      matt 		uobj = pg->uobject;
    917           1.8       mrg 
    918          1.73      yamt 		/*
    919          1.73      yamt 		 * first we attempt to lock the object that this page
    920          1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    921          1.73      yamt 		 * the next page (no harm done).  it is important to
    922          1.73      yamt 		 * "try" locking the object as we are locking in the
    923          1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    924          1.73      yamt 		 * deadlock.
    925          1.73      yamt 		 *
    926          1.73      yamt 		 * the only time we expect to see an ownerless page
    927          1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    928          1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    929          1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    930          1.73      yamt 		 * case, the anon can "take over" the loaned page
    931          1.73      yamt 		 * and make it its own.
    932          1.73      yamt 		 */
    933          1.30       chs 
    934  1.93.4.2.4.3      matt 		slock = uvmpd_trylockowner(pg);
    935          1.76      yamt 		if (slock == NULL) {
    936          1.89        ad 			/*
    937          1.89        ad 			 * yield cpu to make a chance for an LWP holding
    938          1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    939          1.89        ad 			 * if the page queue is filled with a lot of pages
    940          1.89        ad 			 * from few objects.
    941          1.89        ad 			 */
    942          1.89        ad 			lockownerfail++;
    943          1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    944          1.89        ad 				mutex_exit(&uvm_pageqlock);
    945          1.89        ad 				/* XXX Better than yielding but inadequate. */
    946          1.89        ad 				kpause("livelock", false, 1, NULL);
    947          1.89        ad 				mutex_enter(&uvm_pageqlock);
    948          1.89        ad 				lockownerfail = 0;
    949          1.89        ad 			}
    950          1.76      yamt 			continue;
    951          1.76      yamt 		}
    952  1.93.4.2.4.3      matt 		if (pg->flags & PG_BUSY) {
    953          1.89        ad 			mutex_exit(slock);
    954  1.93.4.2.4.6      matt 			busy++;
    955          1.76      yamt 			continue;
    956          1.76      yamt 		}
    957          1.76      yamt 
    958          1.73      yamt 		/* does the page belong to an object? */
    959          1.73      yamt 		if (uobj != NULL) {
    960  1.93.4.2.4.3      matt 			grp->pgrp_pdobscan++;
    961          1.73      yamt 		} else {
    962          1.73      yamt #if defined(VMSWAP)
    963          1.73      yamt 			KASSERT(anon != NULL);
    964  1.93.4.2.4.3      matt 			grp->pgrp_pdanscan++;
    965          1.68      yamt #else /* defined(VMSWAP) */
    966          1.73      yamt 			panic("%s: anon", __func__);
    967          1.68      yamt #endif /* defined(VMSWAP) */
    968          1.73      yamt 		}
    969           1.8       mrg 
    970          1.37       chs 
    971          1.73      yamt 		/*
    972          1.73      yamt 		 * we now have the object and the page queues locked.
    973          1.73      yamt 		 * if the page is not swap-backed, call the object's
    974          1.73      yamt 		 * pager to flush and free the page.
    975          1.73      yamt 		 */
    976          1.37       chs 
    977          1.69      yamt #if defined(READAHEAD_STATS)
    978  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_READAHEAD) != 0) {
    979  1.93.4.2.4.3      matt 			pg->pqflags &= ~PQ_READAHEAD;
    980          1.73      yamt 			uvm_ra_miss.ev_count++;
    981          1.73      yamt 		}
    982          1.69      yamt #endif /* defined(READAHEAD_STATS) */
    983          1.69      yamt 
    984  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
    985          1.82       alc 			KASSERT(uobj != NULL);
    986          1.89        ad 			mutex_exit(&uvm_pageqlock);
    987  1.93.4.2.4.3      matt 			(void) (uobj->pgops->pgo_put)(uobj, pg->offset,
    988  1.93.4.2.4.3      matt 			    pg->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    989  1.93.4.2.4.6      matt 			grp->pgrp_pdputs++;
    990          1.89        ad 			mutex_enter(&uvm_pageqlock);
    991          1.73      yamt 			continue;
    992          1.73      yamt 		}
    993          1.37       chs 
    994          1.73      yamt 		/*
    995          1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    996          1.73      yamt 		 * from the page so we can sync the modified info
    997          1.73      yamt 		 * without any race conditions.  if the page is clean
    998          1.73      yamt 		 * we can free it now and continue.
    999          1.73      yamt 		 */
   1000           1.8       mrg 
   1001  1.93.4.2.4.3      matt 		pmap_page_protect(pg, VM_PROT_NONE);
   1002  1.93.4.2.4.3      matt 		if ((pg->flags & PG_CLEAN) && pmap_clear_modify(pg)) {
   1003  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_CLEAN);
   1004          1.73      yamt 		}
   1005  1.93.4.2.4.3      matt 		if (pg->flags & PG_CLEAN) {
   1006          1.73      yamt 			int slot;
   1007          1.73      yamt 			int pageidx;
   1008          1.73      yamt 
   1009  1.93.4.2.4.3      matt 			pageidx = pg->offset >> PAGE_SHIFT;
   1010  1.93.4.2.4.3      matt 			KASSERT(!uvmpdpol_pageisqueued_p(pg));
   1011  1.93.4.2.4.3      matt 			uvm_pagefree(pg);
   1012  1.93.4.2.4.6      matt 			freed++;
   1013           1.8       mrg 
   1014           1.8       mrg 			/*
   1015          1.73      yamt 			 * for anons, we need to remove the page
   1016          1.73      yamt 			 * from the anon ourselves.  for aobjs,
   1017          1.73      yamt 			 * pagefree did that for us.
   1018           1.8       mrg 			 */
   1019          1.24       chs 
   1020          1.73      yamt 			if (anon) {
   1021          1.73      yamt 				KASSERT(anon->an_swslot != 0);
   1022          1.73      yamt 				anon->an_page = NULL;
   1023          1.73      yamt 				slot = anon->an_swslot;
   1024          1.73      yamt 			} else {
   1025          1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
   1026           1.8       mrg 			}
   1027          1.89        ad 			mutex_exit(slock);
   1028           1.8       mrg 
   1029          1.73      yamt 			if (slot > 0) {
   1030          1.73      yamt 				/* this page is now only in swap. */
   1031          1.87        ad 				mutex_enter(&uvm_swap_data_lock);
   1032          1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
   1033          1.73      yamt 				uvmexp.swpgonly++;
   1034          1.87        ad 				mutex_exit(&uvm_swap_data_lock);
   1035          1.37       chs 			}
   1036          1.73      yamt 			continue;
   1037          1.73      yamt 		}
   1038          1.37       chs 
   1039          1.77      yamt #if defined(VMSWAP)
   1040          1.73      yamt 		/*
   1041          1.73      yamt 		 * this page is dirty, skip it if we'll have met our
   1042          1.73      yamt 		 * free target when all the current pageouts complete.
   1043          1.73      yamt 		 */
   1044          1.24       chs 
   1045  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging > grp->pgrp_freetarg << 2) {
   1046          1.89        ad 			mutex_exit(slock);
   1047          1.73      yamt 			continue;
   1048          1.73      yamt 		}
   1049          1.14       chs 
   1050          1.73      yamt 		/*
   1051          1.73      yamt 		 * free any swap space allocated to the page since
   1052          1.73      yamt 		 * we'll have to write it again with its new data.
   1053          1.73      yamt 		 */
   1054          1.24       chs 
   1055  1.93.4.2.4.3      matt 		uvmpd_dropswap(pg);
   1056          1.14       chs 
   1057          1.73      yamt 		/*
   1058          1.73      yamt 		 * start new swap pageout cluster (if necessary).
   1059      1.93.4.1       snj 		 *
   1060      1.93.4.1       snj 		 * if swap is full reactivate this page so that
   1061      1.93.4.1       snj 		 * we eventually cycle all pages through the
   1062      1.93.4.1       snj 		 * inactive queue.
   1063           1.8       mrg 		 */
   1064          1.24       chs 
   1065          1.73      yamt 		if (swapcluster_allocslots(&swc)) {
   1066      1.93.4.1       snj 			dirtyreacts++;
   1067  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1068          1.89        ad 			mutex_exit(slock);
   1069          1.73      yamt 			continue;
   1070           1.8       mrg 		}
   1071           1.8       mrg 
   1072           1.8       mrg 		/*
   1073          1.73      yamt 		 * at this point, we're definitely going reuse this
   1074          1.73      yamt 		 * page.  mark the page busy and delayed-free.
   1075          1.73      yamt 		 * we should remove the page from the page queues
   1076          1.73      yamt 		 * so we don't ever look at it again.
   1077          1.73      yamt 		 * adjust counters and such.
   1078           1.8       mrg 		 */
   1079           1.8       mrg 
   1080  1.93.4.2.4.3      matt 		pg->flags |= PG_BUSY;
   1081  1.93.4.2.4.8      matt 		UVM_PAGE_OWN(pg, "scan_queue", NULL);
   1082          1.73      yamt 
   1083  1.93.4.2.4.3      matt 		pg->flags |= PG_PAGEOUT;
   1084  1.93.4.2.4.3      matt 		uvm_pagedequeue(pg);
   1085          1.73      yamt 
   1086  1.93.4.2.4.3      matt 		grp->pgrp_pgswapout++;
   1087          1.89        ad 		mutex_exit(&uvm_pageqlock);
   1088           1.8       mrg 
   1089           1.8       mrg 		/*
   1090          1.73      yamt 		 * add the new page to the cluster.
   1091           1.8       mrg 		 */
   1092           1.8       mrg 
   1093  1.93.4.2.4.3      matt 		if (swapcluster_add(&swc, pg)) {
   1094  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_BUSY|PG_PAGEOUT);
   1095  1.93.4.2.4.8      matt 			UVM_PAGE_OWN(pg, NULL, NULL);
   1096          1.89        ad 			mutex_enter(&uvm_pageqlock);
   1097          1.77      yamt 			dirtyreacts++;
   1098  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1099          1.89        ad 			mutex_exit(slock);
   1100          1.73      yamt 			continue;
   1101          1.73      yamt 		}
   1102          1.89        ad 		mutex_exit(slock);
   1103          1.73      yamt 
   1104  1.93.4.2.4.3      matt 		swapcluster_flush(grp, &swc, false);
   1105          1.89        ad 		mutex_enter(&uvm_pageqlock);
   1106          1.73      yamt 
   1107           1.8       mrg 		/*
   1108          1.31       chs 		 * the pageout is in progress.  bump counters and set up
   1109          1.31       chs 		 * for the next loop.
   1110           1.8       mrg 		 */
   1111           1.8       mrg 
   1112          1.31       chs 		uvmexp.pdpending++;
   1113          1.77      yamt #else /* defined(VMSWAP) */
   1114  1.93.4.2.4.3      matt 		uvm_pageactivate(pg);
   1115          1.89        ad 		mutex_exit(slock);
   1116          1.77      yamt #endif /* defined(VMSWAP) */
   1117          1.73      yamt 	}
   1118          1.73      yamt 
   1119  1.93.4.2.4.6      matt 	UVMHIST_LOG(pdhist,"  [%zd] <-- done: %u victims: %u freed, %u busy",
   1120  1.93.4.2.4.6      matt 	    grp - uvm.pggroups, victims, freed, busy);
   1121  1.93.4.2.4.6      matt 
   1122  1.93.4.2.4.6      matt 	grp->pgrp_pdvictims += victims;
   1123  1.93.4.2.4.6      matt 	grp->pgrp_pdnullscans += (victims == 0);
   1124  1.93.4.2.4.6      matt 	grp->pgrp_pdfreed += freed;
   1125  1.93.4.2.4.6      matt 	grp->pgrp_pdbusy += busy;
   1126  1.93.4.2.4.6      matt 
   1127          1.73      yamt #if defined(VMSWAP)
   1128          1.89        ad 	mutex_exit(&uvm_pageqlock);
   1129  1.93.4.2.4.3      matt 	swapcluster_flush(grp, &swc, true);
   1130          1.89        ad 	mutex_enter(&uvm_pageqlock);
   1131          1.68      yamt #endif /* defined(VMSWAP) */
   1132           1.1       mrg }
   1133           1.1       mrg 
   1134           1.1       mrg /*
   1135           1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
   1136           1.1       mrg  *
   1137           1.1       mrg  * => called with pageq's locked
   1138           1.1       mrg  */
   1139           1.1       mrg 
   1140          1.65   thorpej static void
   1141  1.93.4.2.4.3      matt uvmpd_scan(struct uvm_pggroup *grp)
   1142           1.1       mrg {
   1143  1.93.4.2.4.3      matt 	u_int swap_shortage, pages_freed;
   1144           1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
   1145           1.1       mrg 
   1146  1.93.4.2.4.3      matt 	grp->pgrp_pdrevs++;
   1147           1.1       mrg 
   1148           1.8       mrg 	/*
   1149          1.93        ad 	 * work on meeting our targets.   first we work on our free target
   1150          1.93        ad 	 * by converting inactive pages into free pages.  then we work on
   1151          1.93        ad 	 * meeting our inactive target by converting active pages to
   1152          1.93        ad 	 * inactive ones.
   1153           1.8       mrg 	 */
   1154           1.8       mrg 
   1155           1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
   1156           1.8       mrg 
   1157  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed;
   1158  1.93.4.2.4.3      matt 	uvmpd_scan_queue(grp);
   1159  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed - pages_freed;
   1160           1.8       mrg 
   1161           1.8       mrg 	/*
   1162          1.14       chs 	 * detect if we're not going to be able to page anything out
   1163          1.14       chs 	 * until we free some swap resources from active pages.
   1164          1.14       chs 	 */
   1165          1.24       chs 
   1166          1.14       chs 	swap_shortage = 0;
   1167  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freetarg &&
   1168          1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
   1169          1.52        pk 	    !uvm_swapisfull() &&
   1170          1.14       chs 	    pages_freed == 0) {
   1171  1.93.4.2.4.3      matt 		swap_shortage = grp->pgrp_freetarg - grp->pgrp_free;
   1172          1.14       chs 	}
   1173          1.24       chs 
   1174  1.93.4.2.4.3      matt 	uvmpdpol_balancequeue(grp, swap_shortage);
   1175          1.93        ad 
   1176          1.93        ad 	/*
   1177          1.93        ad 	 * swap out some processes if we are still below the minimum
   1178          1.93        ad 	 * free target.  we need to unlock the page queues for this.
   1179          1.93        ad 	 */
   1180          1.93        ad 
   1181  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freemin
   1182  1.93.4.2.4.3      matt 	    && uvmexp.nswapdev != 0 && uvm.swapout_enabled) {
   1183  1.93.4.2.4.3      matt 		grp->pgrp_pdswout++;
   1184          1.93        ad 		UVMHIST_LOG(pdhist,"  free %d < min %d: swapout",
   1185          1.93        ad 		    uvmexp.free, uvmexp.freemin, 0, 0);
   1186          1.93        ad 		mutex_exit(&uvm_pageqlock);
   1187          1.93        ad 		uvm_swapout_threads();
   1188          1.93        ad 		mutex_enter(&uvm_pageqlock);
   1189          1.93        ad 
   1190          1.93        ad 	}
   1191           1.1       mrg }
   1192          1.62      yamt 
   1193          1.62      yamt /*
   1194          1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
   1195          1.62      yamt  *
   1196          1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
   1197          1.62      yamt  *
   1198          1.62      yamt  * XXX should be tunable.
   1199          1.62      yamt  * XXX should consider pools, etc?
   1200          1.62      yamt  */
   1201          1.62      yamt 
   1202          1.83   thorpej bool
   1203          1.62      yamt uvm_reclaimable(void)
   1204          1.62      yamt {
   1205          1.62      yamt 	int filepages;
   1206          1.77      yamt 	int active, inactive;
   1207          1.62      yamt 
   1208          1.62      yamt 	/*
   1209          1.62      yamt 	 * if swap is not full, no problem.
   1210          1.62      yamt 	 */
   1211          1.62      yamt 
   1212          1.62      yamt 	if (!uvm_swapisfull()) {
   1213          1.84   thorpej 		return true;
   1214          1.62      yamt 	}
   1215          1.62      yamt 
   1216          1.62      yamt 	/*
   1217          1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1218          1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1219          1.62      yamt 	 *
   1220          1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1221          1.63      yamt 	 *
   1222          1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1223          1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1224          1.62      yamt 	 */
   1225          1.62      yamt 
   1226          1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1227          1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1228          1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1229          1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1230          1.84   thorpej 		return true;
   1231          1.62      yamt 	}
   1232          1.62      yamt 
   1233          1.62      yamt 	/*
   1234          1.62      yamt 	 * kill the process, fail allocation, etc..
   1235          1.62      yamt 	 */
   1236          1.62      yamt 
   1237          1.84   thorpej 	return false;
   1238          1.62      yamt }
   1239          1.77      yamt 
   1240          1.77      yamt void
   1241  1.93.4.2.4.3      matt uvm_estimatepageable(u_int *active, u_int *inactive)
   1242          1.77      yamt {
   1243          1.77      yamt 
   1244          1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1245          1.77      yamt }
   1246