Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.93.4.2.4.3
      1  1.93.4.2.4.3      matt /*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.3 2012/02/09 03:05:01 matt Exp $	*/
      2           1.1       mrg 
      3          1.34       chs /*
      4           1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5          1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6           1.1       mrg  *
      7           1.1       mrg  * All rights reserved.
      8           1.1       mrg  *
      9           1.1       mrg  * This code is derived from software contributed to Berkeley by
     10           1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11           1.1       mrg  *
     12           1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13           1.1       mrg  * modification, are permitted provided that the following conditions
     14           1.1       mrg  * are met:
     15           1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16           1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17           1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18           1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19           1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20           1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21           1.1       mrg  *    must display the following acknowledgement:
     22           1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23          1.34       chs  *      Washington University, the University of California, Berkeley and
     24           1.1       mrg  *      its contributors.
     25           1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26           1.1       mrg  *    may be used to endorse or promote products derived from this software
     27           1.1       mrg  *    without specific prior written permission.
     28           1.1       mrg  *
     29           1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30           1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31           1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32           1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33           1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34           1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35           1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36           1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37           1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38           1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39           1.1       mrg  * SUCH DAMAGE.
     40           1.1       mrg  *
     41           1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42           1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43           1.1       mrg  *
     44           1.1       mrg  *
     45           1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46           1.1       mrg  * All rights reserved.
     47          1.34       chs  *
     48           1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49           1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50           1.1       mrg  * notice and this permission notice appear in all copies of the
     51           1.1       mrg  * software, derivative works or modified versions, and any portions
     52           1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53          1.34       chs  *
     54          1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55          1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56           1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57          1.34       chs  *
     58           1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59           1.1       mrg  *
     60           1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61           1.1       mrg  *  School of Computer Science
     62           1.1       mrg  *  Carnegie Mellon University
     63           1.1       mrg  *  Pittsburgh PA 15213-3890
     64           1.1       mrg  *
     65           1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66           1.1       mrg  * rights to redistribute these changes.
     67           1.1       mrg  */
     68           1.1       mrg 
     69           1.1       mrg /*
     70           1.1       mrg  * uvm_pdaemon.c: the page daemon
     71           1.1       mrg  */
     72          1.42     lukem 
     73          1.42     lukem #include <sys/cdefs.h>
     74  1.93.4.2.4.3      matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.3 2012/02/09 03:05:01 matt Exp $");
     75          1.42     lukem 
     76          1.42     lukem #include "opt_uvmhist.h"
     77          1.69      yamt #include "opt_readahead.h"
     78           1.1       mrg 
     79           1.1       mrg #include <sys/param.h>
     80           1.1       mrg #include <sys/proc.h>
     81           1.1       mrg #include <sys/systm.h>
     82           1.1       mrg #include <sys/kernel.h>
     83           1.9        pk #include <sys/pool.h>
     84          1.24       chs #include <sys/buf.h>
     85      1.93.4.2       snj #include <sys/atomic.h>
     86           1.1       mrg 
     87           1.1       mrg #include <uvm/uvm.h>
     88          1.77      yamt #include <uvm/uvm_pdpolicy.h>
     89           1.1       mrg 
     90           1.1       mrg /*
     91          1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     92          1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     93          1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     94          1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     95          1.14       chs  */
     96          1.14       chs 
     97          1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     98          1.14       chs 
     99          1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
    100          1.14       chs 
    101          1.14       chs /*
    102           1.1       mrg  * local prototypes
    103           1.1       mrg  */
    104           1.1       mrg 
    105  1.93.4.2.4.3      matt static void	uvmpd_scan(struct uvm_pggroup *);
    106  1.93.4.2.4.3      matt static void	uvmpd_scan_queue(struct uvm_pggroup *);
    107          1.65   thorpej static void	uvmpd_tune(void);
    108           1.1       mrg 
    109  1.93.4.2.4.3      matt static struct uvm_pdinfo {
    110  1.93.4.2.4.3      matt 	unsigned int pd_waiters;
    111  1.93.4.2.4.3      matt 	unsigned int pd_scans_neededs;
    112  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pagingq;
    113  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pendingq;
    114  1.93.4.2.4.3      matt } uvm_pdinfo =  {
    115  1.93.4.2.4.3      matt 	.pd_pagingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pagingq),
    116  1.93.4.2.4.3      matt 	.pd_pendingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pendingq),
    117  1.93.4.2.4.3      matt };
    118          1.89        ad 
    119           1.1       mrg /*
    120          1.61       chs  * XXX hack to avoid hangs when large processes fork.
    121          1.61       chs  */
    122      1.93.4.2       snj u_int uvm_extrapages;
    123          1.61       chs 
    124          1.61       chs /*
    125           1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    126           1.1       mrg  *
    127           1.1       mrg  * => should be called with all locks released
    128           1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    129           1.1       mrg  */
    130           1.1       mrg 
    131          1.19   thorpej void
    132          1.65   thorpej uvm_wait(const char *wmsg)
    133           1.8       mrg {
    134           1.8       mrg 	int timo = 0;
    135          1.89        ad 
    136          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    137           1.1       mrg 
    138           1.8       mrg 	/*
    139           1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    140           1.8       mrg 	 */
    141           1.1       mrg 
    142          1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    143           1.8       mrg 		/*
    144           1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    145           1.8       mrg 		 * sleep until it frees more memory.   but how can it
    146           1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    147           1.8       mrg 		 * we have two options:
    148           1.8       mrg 		 *  [1] panic now
    149           1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    150           1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    151           1.8       mrg 		 *
    152           1.8       mrg 		 * note that option [2] will only help us if we get lucky
    153           1.8       mrg 		 * and some other process on the system breaks the deadlock
    154           1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    155           1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    156           1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    157           1.8       mrg 		 * yet, this should never happen in the first place!).
    158           1.8       mrg 		 */
    159           1.1       mrg 
    160           1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    161           1.8       mrg 		timo = hz >> 3;		/* set timeout */
    162           1.1       mrg #if defined(DEBUG)
    163           1.8       mrg 		/* DEBUG: panic so we can debug it */
    164           1.8       mrg 		panic("pagedaemon deadlock");
    165           1.1       mrg #endif
    166           1.8       mrg 	}
    167           1.1       mrg 
    168  1.93.4.2.4.3      matt 	uvm_pdinfo.pd_waiters++;
    169          1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    170          1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    171           1.1       mrg }
    172           1.1       mrg 
    173          1.77      yamt /*
    174          1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    175          1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    176          1.89        ad  *
    177          1.89        ad  * => called with uvm_fpageqlock held.
    178          1.77      yamt  */
    179          1.77      yamt 
    180          1.77      yamt void
    181          1.77      yamt uvm_kick_pdaemon(void)
    182          1.77      yamt {
    183  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    184  1.93.4.2.4.3      matt 	bool need_wakeup = false;
    185  1.93.4.2.4.3      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    186          1.77      yamt 
    187          1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    188          1.89        ad 
    189  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    190  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    191  1.93.4.2.4.3      matt 		const bool prev_scan_needed = grp->pgrp_scan_needed;
    192  1.93.4.2.4.3      matt 
    193  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    194  1.93.4.2.4.3      matt 
    195  1.93.4.2.4.3      matt 		grp->pgrp_scan_needed =
    196  1.93.4.2.4.3      matt 		    grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freemin
    197  1.93.4.2.4.3      matt 		    || (grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freetarg
    198  1.93.4.2.4.3      matt 			&& uvmpdpol_needsscan_p(grp));
    199  1.93.4.2.4.3      matt 
    200  1.93.4.2.4.3      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    201  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d->%d (scan=%d)",
    202  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, prev_scan_needed,
    203  1.93.4.2.4.3      matt 			    grp->pgrp_scan_needed, uvmpdpol_needsscan_p(grp));
    204  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d < min(%d,%d)",
    205  1.93.4.2.4.3      matt 			    grp - uvm.pggroups,
    206  1.93.4.2.4.3      matt 			    grp->pgrp_free + grp->pgrp_paging,
    207  1.93.4.2.4.3      matt 			    grp->pgrp_freemin, grp->pgrp_freetarg);
    208  1.93.4.2.4.3      matt 		}
    209  1.93.4.2.4.3      matt 
    210  1.93.4.2.4.3      matt 		if (grp->pgrp_paging == 0
    211  1.93.4.2.4.3      matt 		    && prev_scan_needed != grp->pgrp_scan_needed) {
    212  1.93.4.2.4.3      matt 			if (grp->pgrp_scan_needed) {
    213  1.93.4.2.4.3      matt 				TAILQ_INSERT_TAIL(&pdinfo->pd_pendingq,
    214  1.93.4.2.4.3      matt 				    grp, pgrp_pd_link);
    215  1.93.4.2.4.3      matt 				need_wakeup = true;
    216  1.93.4.2.4.3      matt 			} else {
    217  1.93.4.2.4.3      matt 				TAILQ_REMOVE(&pdinfo->pd_pendingq,
    218  1.93.4.2.4.3      matt 				    grp, pgrp_pd_link);
    219  1.93.4.2.4.3      matt 			}
    220  1.93.4.2.4.3      matt 		}
    221          1.77      yamt 	}
    222  1.93.4.2.4.3      matt 
    223  1.93.4.2.4.3      matt 	if (need_wakeup)
    224  1.93.4.2.4.3      matt 		wakeup(&uvm.pagedaemon);
    225  1.93.4.2.4.3      matt 
    226  1.93.4.2.4.3      matt 	UVMHIST_LOG(pdhist, " <- done: wakeup=%d!",
    227  1.93.4.2.4.3      matt 	    grp - uvm.pggroups, need_wakeup, 0, 0);
    228          1.77      yamt }
    229           1.1       mrg 
    230           1.1       mrg /*
    231           1.1       mrg  * uvmpd_tune: tune paging parameters
    232           1.1       mrg  *
    233           1.1       mrg  * => called when ever memory is added (or removed?) to the system
    234           1.1       mrg  * => caller must call with page queues locked
    235           1.1       mrg  */
    236           1.1       mrg 
    237          1.65   thorpej static void
    238          1.37       chs uvmpd_tune(void)
    239           1.8       mrg {
    240  1.93.4.2.4.3      matt 	u_int extrapages = atomic_swap_uint(&uvm_extrapages, 0) / uvmexp.ncolors;
    241  1.93.4.2.4.3      matt 	u_int freemin = 0;
    242  1.93.4.2.4.3      matt 	u_int freetarg = 0;
    243  1.93.4.2.4.3      matt 	u_int wiredmax = 0;
    244      1.93.4.2       snj 
    245           1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    246           1.1       mrg 
    247  1.93.4.2.4.3      matt 	extrapages = roundup(extrapages, uvmexp.npggroups);
    248  1.93.4.2.4.3      matt 
    249  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    250  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    251  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    252  1.93.4.2.4.3      matt 
    253  1.93.4.2.4.3      matt 		/*
    254  1.93.4.2.4.3      matt 		 * try to keep 0.5% of available RAM free, but limit
    255  1.93.4.2.4.3      matt 		 * to between 128k and 1024k per-CPU.
    256  1.93.4.2.4.3      matt 		 * XXX: what are these values good for?
    257  1.93.4.2.4.3      matt 		 */
    258  1.93.4.2.4.3      matt 		u_int val = grp->pgrp_npages / 200;
    259  1.93.4.2.4.3      matt 		val = MAX(val, (128*1024) >> PAGE_SHIFT);
    260  1.93.4.2.4.3      matt 		val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    261  1.93.4.2.4.3      matt 		val *= ncpu;
    262  1.93.4.2.4.3      matt 
    263  1.93.4.2.4.3      matt 		/* Make sure there's always a user page free. */
    264  1.93.4.2.4.3      matt 		if (val * uvmexp.npggroups <= uvmexp.reserve_kernel)
    265  1.93.4.2.4.3      matt 			val = uvmexp.reserve_kernel / uvmexp.npggroups + 1;
    266  1.93.4.2.4.3      matt 
    267  1.93.4.2.4.3      matt 		grp->pgrp_freemin = val;
    268  1.93.4.2.4.3      matt 
    269  1.93.4.2.4.3      matt 		/* Calculate freetarg. */
    270  1.93.4.2.4.3      matt 		val = (grp->pgrp_freemin * 4) / 3;
    271  1.93.4.2.4.3      matt 		if (val <= grp->pgrp_freemin)
    272  1.93.4.2.4.3      matt 			val = grp->pgrp_freemin + 1;
    273  1.93.4.2.4.3      matt 		grp->pgrp_freetarg = val + extrapages / uvmexp.npggroups;
    274  1.93.4.2.4.3      matt 		if (grp->pgrp_freetarg > grp->pgrp_npages / 2)
    275  1.93.4.2.4.3      matt 			grp->pgrp_freetarg = grp->pgrp_npages / 2;
    276  1.93.4.2.4.3      matt 
    277  1.93.4.2.4.3      matt 		grp->pgrp_wiredmax = grp->pgrp_npages / 3;
    278  1.93.4.2.4.3      matt 		UVMHIST_LOG(pdhist,
    279  1.93.4.2.4.3      matt 		    "[%zd]: freemin=%d, freetarg=%d, wiredmax=%d",
    280  1.93.4.2.4.3      matt 		    grp - uvm.pggroups, grp->pgrp_freemin, grp->pgrp_freetarg,
    281  1.93.4.2.4.3      matt 		    grp->pgrp_wiredmax);
    282  1.93.4.2.4.3      matt 
    283  1.93.4.2.4.3      matt 		freemin += grp->pgrp_freemin;
    284  1.93.4.2.4.3      matt 		freetarg += grp->pgrp_freetarg;
    285  1.93.4.2.4.3      matt 		wiredmax += grp->pgrp_wiredmax;
    286  1.93.4.2.4.3      matt 	}
    287  1.93.4.2.4.3      matt 
    288  1.93.4.2.4.3      matt 	uvmexp.freemin = freemin;
    289  1.93.4.2.4.3      matt 	uvmexp.freetarg = freetarg;
    290  1.93.4.2.4.3      matt 	uvmexp.wiredmax = wiredmax;
    291          1.61       chs 
    292           1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    293  1.93.4.2.4.3      matt 	    uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    294           1.1       mrg }
    295           1.1       mrg 
    296           1.1       mrg /*
    297           1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    298           1.1       mrg  */
    299           1.1       mrg 
    300           1.8       mrg void
    301          1.80      yamt uvm_pageout(void *arg)
    302           1.8       mrg {
    303  1.93.4.2.4.3      matt 	u_int npages = 0;
    304  1.93.4.2.4.3      matt 	u_int extrapages = 0;
    305  1.93.4.2.4.3      matt 	u_int npggroups = 0;
    306          1.88        ad 	struct pool *pp;
    307          1.88        ad 	uint64_t where;
    308  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    309           1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    310          1.24       chs 
    311           1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    312           1.8       mrg 
    313           1.8       mrg 	/*
    314           1.8       mrg 	 * ensure correct priority and set paging parameters...
    315           1.8       mrg 	 */
    316           1.8       mrg 
    317          1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    318          1.89        ad 	mutex_enter(&uvm_pageqlock);
    319           1.8       mrg 	npages = uvmexp.npages;
    320           1.8       mrg 	uvmpd_tune();
    321          1.89        ad 	mutex_exit(&uvm_pageqlock);
    322           1.8       mrg 
    323           1.8       mrg 	/*
    324           1.8       mrg 	 * main loop
    325           1.8       mrg 	 */
    326          1.24       chs 
    327          1.24       chs 	for (;;) {
    328  1.93.4.2.4.3      matt 		struct uvm_pggroup *grp;
    329  1.93.4.2.4.3      matt 		bool need_free = false;
    330  1.93.4.2.4.3      matt 		u_int bufcnt = 0;
    331          1.24       chs 
    332          1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    333  1.93.4.2.4.3      matt 		/*
    334  1.93.4.2.4.3      matt 		 * If we have no one waiting or all color requests have
    335  1.93.4.2.4.3      matt 		 * active paging, then wait.
    336  1.93.4.2.4.3      matt 		 */
    337  1.93.4.2.4.3      matt 		if (pdinfo->pd_waiters == 0
    338  1.93.4.2.4.3      matt 		    || TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL) {
    339          1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    340          1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    341          1.89        ad 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    342          1.89        ad 			uvmexp.pdwoke++;
    343          1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    344          1.89        ad 		} else {
    345          1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    346          1.89        ad 		}
    347          1.24       chs 
    348           1.8       mrg 		/*
    349          1.24       chs 		 * now lock page queues and recompute inactive count
    350           1.8       mrg 		 */
    351           1.8       mrg 
    352          1.89        ad 		mutex_enter(&uvm_pageqlock);
    353  1.93.4.2.4.3      matt 		mutex_spin_enter(&uvm_fpageqlock);
    354  1.93.4.2.4.3      matt 
    355  1.93.4.2.4.3      matt 		if (npages != uvmexp.npages
    356  1.93.4.2.4.3      matt 		    || extrapages != uvm_extrapages
    357  1.93.4.2.4.3      matt 		    || npggroups != uvmexp.npggroups) {
    358          1.24       chs 			npages = uvmexp.npages;
    359          1.61       chs 			extrapages = uvm_extrapages;
    360  1.93.4.2.4.3      matt 			npggroups = uvmexp.npggroups;
    361          1.24       chs 			uvmpd_tune();
    362          1.24       chs 		}
    363          1.24       chs 
    364          1.60     enami 		/*
    365          1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    366          1.60     enami 		 * system only when entire pool page is empty.
    367          1.60     enami 		 */
    368  1.93.4.2.4.3      matt 		bool need_wakeup = false;
    369  1.93.4.2.4.3      matt 		while ((grp = TAILQ_FIRST(&pdinfo->pd_pendingq)) != NULL) {
    370  1.93.4.2.4.3      matt 			KASSERT(grp->pgrp_npages > 0);
    371          1.60     enami 
    372  1.93.4.2.4.3      matt 			uvmpdpol_tune(grp);
    373           1.8       mrg 
    374  1.93.4.2.4.3      matt 			int diff = grp->pgrp_freetarg - grp->pgrp_free;
    375  1.93.4.2.4.3      matt 			if (diff < 0)
    376  1.93.4.2.4.3      matt 				diff = 0;
    377          1.89        ad 
    378  1.93.4.2.4.3      matt 			bufcnt += diff;
    379           1.8       mrg 
    380  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist," [%zu]: "
    381  1.93.4.2.4.3      matt 			    "free/ftarg/fmin=%u/%u/%u",
    382  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    383  1.93.4.2.4.3      matt 			    grp->pgrp_freetarg, grp->pgrp_freemin);
    384  1.93.4.2.4.3      matt 
    385  1.93.4.2.4.3      matt 
    386  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff)
    387  1.93.4.2.4.3      matt 				need_free = true;
    388  1.93.4.2.4.3      matt 
    389  1.93.4.2.4.3      matt 			/*
    390  1.93.4.2.4.3      matt 			 * scan if needed
    391  1.93.4.2.4.3      matt 			 */
    392  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff
    393  1.93.4.2.4.3      matt 			    || uvmpdpol_needsscan_p(grp)) {
    394  1.93.4.2.4.3      matt 				mutex_spin_exit(&uvm_fpageqlock);
    395  1.93.4.2.4.3      matt 				uvmpd_scan(grp);
    396  1.93.4.2.4.3      matt 				mutex_spin_enter(&uvm_fpageqlock);
    397  1.93.4.2.4.3      matt 			} else {
    398  1.93.4.2.4.3      matt 				UVMHIST_LOG(pdhist,
    399  1.93.4.2.4.3      matt 				    " [%zu]: diff/paging=%u/%u: "
    400  1.93.4.2.4.3      matt 				    "scan skipped",
    401  1.93.4.2.4.3      matt 				    grp - uvm.pggroups, diff,
    402  1.93.4.2.4.3      matt 				    grp->pgrp_paging, 0);
    403  1.93.4.2.4.3      matt 			}
    404  1.93.4.2.4.3      matt 
    405  1.93.4.2.4.3      matt 			/*
    406  1.93.4.2.4.3      matt 			 * if there's any free memory to be had,
    407  1.93.4.2.4.3      matt 			 * wake up any waiters.
    408  1.93.4.2.4.3      matt 			 */
    409  1.93.4.2.4.3      matt 			if (grp->pgrp_free * uvmexp.npggroups > uvmexp.reserve_kernel
    410  1.93.4.2.4.3      matt 			    || grp->pgrp_paging == 0) {
    411  1.93.4.2.4.3      matt 				need_wakeup = true;
    412  1.93.4.2.4.3      matt 			}
    413  1.93.4.2.4.3      matt 
    414  1.93.4.2.4.3      matt 			/*
    415  1.93.4.2.4.3      matt 			 * We are done, remove it from the queue.
    416  1.93.4.2.4.3      matt 			 */
    417  1.93.4.2.4.3      matt 			TAILQ_REMOVE(&pdinfo->pd_pendingq, grp, pgrp_pd_link);
    418  1.93.4.2.4.3      matt 			grp->pgrp_scan_needed = false;
    419  1.93.4.2.4.3      matt 		}
    420  1.93.4.2.4.3      matt 		if (need_wakeup) {
    421  1.93.4.2.4.3      matt 			pdinfo->pd_waiters = 0;
    422          1.24       chs 			wakeup(&uvmexp.free);
    423           1.8       mrg 		}
    424  1.93.4.2.4.3      matt 		KASSERT (!need_free || need_wakeup);
    425          1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    426           1.1       mrg 
    427           1.8       mrg 		/*
    428  1.93.4.2.4.3      matt 		 * scan done.  unlock page queues (the only lock
    429  1.93.4.2.4.3      matt 		 * we are holding)
    430           1.8       mrg 		 */
    431          1.89        ad 		mutex_exit(&uvm_pageqlock);
    432          1.38       chs 
    433          1.88        ad 		/*
    434          1.93        ad 		 * if we don't need free memory, we're done.
    435          1.93        ad 		 */
    436          1.93        ad 
    437  1.93.4.2.4.3      matt 		if (!need_free)
    438          1.93        ad 			continue;
    439          1.93        ad 
    440          1.93        ad 		/*
    441          1.88        ad 		 * start draining pool resources now that we're not
    442          1.88        ad 		 * holding any locks.
    443          1.88        ad 		 */
    444          1.88        ad 		pool_drain_start(&pp, &where);
    445          1.60     enami 
    446          1.38       chs 		/*
    447          1.88        ad 		 * kill unused metadata buffers.
    448          1.38       chs 		 */
    449  1.93.4.2.4.3      matt 		if (bufcnt > 0) {
    450  1.93.4.2.4.3      matt 			mutex_enter(&bufcache_lock);
    451  1.93.4.2.4.3      matt 			buf_drain(bufcnt << PAGE_SHIFT);
    452  1.93.4.2.4.3      matt 			mutex_exit(&bufcache_lock);
    453  1.93.4.2.4.3      matt 		}
    454          1.57  jdolecek 
    455          1.57  jdolecek 		/*
    456          1.88        ad 		 * complete draining the pools.
    457          1.88        ad 		 */
    458          1.88        ad 		pool_drain_end(pp, where);
    459          1.24       chs 	}
    460          1.24       chs 	/*NOTREACHED*/
    461          1.24       chs }
    462          1.24       chs 
    463           1.8       mrg 
    464          1.24       chs /*
    465          1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    466          1.24       chs  */
    467           1.8       mrg 
    468          1.24       chs void
    469          1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    470          1.24       chs {
    471          1.81      yamt 	struct buf *bp = (void *)wk;
    472           1.9        pk 
    473          1.81      yamt 	KASSERT(&bp->b_work == wk);
    474           1.8       mrg 
    475          1.81      yamt 	/*
    476          1.81      yamt 	 * process an i/o that's done.
    477          1.81      yamt 	 */
    478           1.8       mrg 
    479          1.81      yamt 	(*bp->b_iodone)(bp);
    480          1.89        ad }
    481          1.89        ad 
    482          1.89        ad void
    483  1.93.4.2.4.3      matt uvm_pageout_start(struct uvm_pggroup *grp, u_int npages)
    484          1.89        ad {
    485  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    486          1.89        ad 
    487          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    488  1.93.4.2.4.3      matt 
    489          1.89        ad 	uvmexp.paging += npages;
    490  1.93.4.2.4.3      matt 	if (grp->pgrp_paging == 0) {
    491  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_scan_needed);
    492  1.93.4.2.4.3      matt 		TAILQ_REMOVE(&pdinfo->pd_pendingq, grp, pgrp_pd_link);
    493  1.93.4.2.4.3      matt 		TAILQ_INSERT_TAIL(&pdinfo->pd_pagingq, grp, pgrp_pd_link);
    494  1.93.4.2.4.3      matt 	}
    495  1.93.4.2.4.3      matt 	grp->pgrp_paging += npages;
    496          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    497          1.89        ad }
    498          1.89        ad 
    499          1.89        ad void
    500  1.93.4.2.4.3      matt uvm_pageout_done(struct vm_page *pg, bool freed)
    501          1.89        ad {
    502  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    503  1.93.4.2.4.3      matt 
    504  1.93.4.2.4.3      matt 	KASSERT(pg->flags & PG_PAGEOUT);
    505          1.89        ad 
    506          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    507  1.93.4.2.4.3      matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    508  1.93.4.2.4.3      matt 
    509  1.93.4.2.4.3      matt 	KASSERT(grp->pgrp_paging > 0);
    510  1.93.4.2.4.3      matt 	if (--grp->pgrp_paging == 0) {
    511  1.93.4.2.4.3      matt 		TAILQ_REMOVE(&pdinfo->pd_pagingq, grp, pgrp_pd_link);
    512  1.93.4.2.4.3      matt 		if (grp->pgrp_scan_needed) {
    513  1.93.4.2.4.3      matt 			TAILQ_INSERT_TAIL(&pdinfo->pd_pendingq, grp, pgrp_pd_link);
    514  1.93.4.2.4.3      matt 		}
    515  1.93.4.2.4.3      matt 	}
    516  1.93.4.2.4.3      matt 
    517  1.93.4.2.4.3      matt 	KASSERT(uvmexp.paging > 0);
    518  1.93.4.2.4.3      matt 	uvmexp.paging--;
    519  1.93.4.2.4.3      matt 	grp->pgrp_pdfreed += freed;
    520          1.89        ad 
    521          1.89        ad 	/*
    522          1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    523          1.89        ad 	 */
    524  1.93.4.2.4.3      matt 	if (grp->pgrp_free * uvmexp.npggroups <= uvmexp.reserve_kernel) {
    525          1.81      yamt 		wakeup(&uvm.pagedaemon);
    526          1.81      yamt 	} else {
    527  1.93.4.2.4.3      matt 		pdinfo->pd_waiters = 0;
    528          1.81      yamt 		wakeup(&uvmexp.free);
    529           1.8       mrg 	}
    530  1.93.4.2.4.3      matt 
    531          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    532           1.1       mrg }
    533           1.1       mrg 
    534          1.76      yamt /*
    535          1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    536          1.76      yamt  *
    537          1.76      yamt  * => called with pageq locked.
    538          1.76      yamt  * => resolve orphaned O->A loaned page.
    539          1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    540          1.76      yamt  */
    541          1.76      yamt 
    542          1.89        ad kmutex_t *
    543          1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    544          1.76      yamt {
    545          1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    546          1.89        ad 	kmutex_t *slock;
    547          1.89        ad 
    548          1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    549          1.76      yamt 
    550          1.76      yamt 	if (uobj != NULL) {
    551          1.76      yamt 		slock = &uobj->vmobjlock;
    552          1.76      yamt 	} else {
    553          1.76      yamt 		struct vm_anon *anon = pg->uanon;
    554          1.76      yamt 
    555          1.76      yamt 		KASSERT(anon != NULL);
    556          1.76      yamt 		slock = &anon->an_lock;
    557          1.76      yamt 	}
    558          1.76      yamt 
    559          1.89        ad 	if (!mutex_tryenter(slock)) {
    560          1.76      yamt 		return NULL;
    561          1.76      yamt 	}
    562          1.76      yamt 
    563          1.76      yamt 	if (uobj == NULL) {
    564          1.76      yamt 
    565          1.76      yamt 		/*
    566          1.76      yamt 		 * set PQ_ANON if it isn't set already.
    567          1.76      yamt 		 */
    568          1.76      yamt 
    569          1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    570          1.76      yamt 			KASSERT(pg->loan_count > 0);
    571          1.76      yamt 			pg->loan_count--;
    572          1.76      yamt 			pg->pqflags |= PQ_ANON;
    573          1.76      yamt 			/* anon now owns it */
    574          1.76      yamt 		}
    575          1.76      yamt 	}
    576          1.76      yamt 
    577          1.76      yamt 	return slock;
    578          1.76      yamt }
    579          1.76      yamt 
    580          1.73      yamt #if defined(VMSWAP)
    581          1.73      yamt struct swapcluster {
    582          1.73      yamt 	int swc_slot;
    583          1.73      yamt 	int swc_nallocated;
    584          1.73      yamt 	int swc_nused;
    585          1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    586          1.73      yamt };
    587          1.73      yamt 
    588          1.73      yamt static void
    589          1.73      yamt swapcluster_init(struct swapcluster *swc)
    590          1.73      yamt {
    591          1.73      yamt 
    592          1.73      yamt 	swc->swc_slot = 0;
    593          1.89        ad 	swc->swc_nused = 0;
    594          1.73      yamt }
    595          1.73      yamt 
    596          1.73      yamt static int
    597          1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    598          1.73      yamt {
    599          1.73      yamt 	int slot;
    600          1.73      yamt 	int npages;
    601          1.73      yamt 
    602          1.73      yamt 	if (swc->swc_slot != 0) {
    603          1.73      yamt 		return 0;
    604          1.73      yamt 	}
    605          1.73      yamt 
    606          1.73      yamt 	/* Even with strange MAXPHYS, the shift
    607          1.73      yamt 	   implicitly rounds down to a page. */
    608          1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    609          1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    610          1.73      yamt 	if (slot == 0) {
    611          1.73      yamt 		return ENOMEM;
    612          1.73      yamt 	}
    613          1.73      yamt 	swc->swc_slot = slot;
    614          1.73      yamt 	swc->swc_nallocated = npages;
    615          1.73      yamt 	swc->swc_nused = 0;
    616          1.73      yamt 
    617          1.73      yamt 	return 0;
    618          1.73      yamt }
    619          1.73      yamt 
    620          1.73      yamt static int
    621          1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    622          1.73      yamt {
    623          1.73      yamt 	int slot;
    624          1.73      yamt 	struct uvm_object *uobj;
    625          1.73      yamt 
    626          1.73      yamt 	KASSERT(swc->swc_slot != 0);
    627          1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    628          1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    629          1.73      yamt 
    630          1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    631          1.73      yamt 	uobj = pg->uobject;
    632          1.73      yamt 	if (uobj == NULL) {
    633          1.89        ad 		KASSERT(mutex_owned(&pg->uanon->an_lock));
    634          1.73      yamt 		pg->uanon->an_swslot = slot;
    635          1.73      yamt 	} else {
    636          1.73      yamt 		int result;
    637          1.73      yamt 
    638          1.89        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
    639          1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    640          1.73      yamt 		if (result == -1) {
    641          1.73      yamt 			return ENOMEM;
    642          1.73      yamt 		}
    643          1.73      yamt 	}
    644          1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    645          1.73      yamt 	swc->swc_nused++;
    646          1.73      yamt 
    647          1.73      yamt 	return 0;
    648          1.73      yamt }
    649          1.73      yamt 
    650          1.73      yamt static void
    651  1.93.4.2.4.3      matt swapcluster_flush(struct uvm_pggroup *grp, struct swapcluster *swc, bool now)
    652          1.73      yamt {
    653          1.73      yamt 	int slot;
    654  1.93.4.2.4.3      matt 	u_int nused;
    655          1.73      yamt 	int nallocated;
    656          1.73      yamt 	int error;
    657          1.73      yamt 
    658          1.73      yamt 	if (swc->swc_slot == 0) {
    659          1.73      yamt 		return;
    660          1.73      yamt 	}
    661          1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    662          1.73      yamt 
    663          1.73      yamt 	slot = swc->swc_slot;
    664          1.73      yamt 	nused = swc->swc_nused;
    665          1.73      yamt 	nallocated = swc->swc_nallocated;
    666          1.73      yamt 
    667          1.73      yamt 	/*
    668          1.73      yamt 	 * if this is the final pageout we could have a few
    669          1.73      yamt 	 * unused swap blocks.  if so, free them now.
    670          1.73      yamt 	 */
    671          1.73      yamt 
    672          1.73      yamt 	if (nused < nallocated) {
    673          1.73      yamt 		if (!now) {
    674          1.73      yamt 			return;
    675          1.73      yamt 		}
    676          1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    677          1.73      yamt 	}
    678          1.73      yamt 
    679          1.73      yamt 	/*
    680          1.73      yamt 	 * now start the pageout.
    681          1.73      yamt 	 */
    682          1.73      yamt 
    683          1.91      yamt 	if (nused > 0) {
    684  1.93.4.2.4.3      matt 		grp->pgrp_pdpageouts++;
    685  1.93.4.2.4.3      matt 		uvmexp.pdpageouts++;	/* procfs */
    686  1.93.4.2.4.3      matt 		uvm_pageout_start(grp, nused);
    687          1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    688          1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    689          1.91      yamt 	}
    690          1.73      yamt 
    691          1.73      yamt 	/*
    692          1.73      yamt 	 * zero swslot to indicate that we are
    693          1.73      yamt 	 * no longer building a swap-backed cluster.
    694          1.73      yamt 	 */
    695          1.73      yamt 
    696          1.73      yamt 	swc->swc_slot = 0;
    697          1.89        ad 	swc->swc_nused = 0;
    698          1.89        ad }
    699          1.89        ad 
    700          1.89        ad static int
    701          1.89        ad swapcluster_nused(struct swapcluster *swc)
    702          1.89        ad {
    703          1.89        ad 
    704          1.89        ad 	return swc->swc_nused;
    705          1.73      yamt }
    706          1.77      yamt 
    707          1.77      yamt /*
    708          1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    709          1.77      yamt  *
    710          1.77      yamt  * => called with owner locked.
    711          1.84   thorpej  * => return true if a page had an associated slot.
    712          1.77      yamt  */
    713          1.77      yamt 
    714          1.83   thorpej static bool
    715          1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    716          1.77      yamt {
    717          1.84   thorpej 	bool result = false;
    718          1.77      yamt 	struct vm_anon *anon = pg->uanon;
    719          1.77      yamt 
    720          1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    721          1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    722          1.77      yamt 		anon->an_swslot = 0;
    723          1.77      yamt 		pg->flags &= ~PG_CLEAN;
    724          1.84   thorpej 		result = true;
    725          1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    726          1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    727          1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    728          1.77      yamt 		if (slot) {
    729          1.77      yamt 			uvm_swap_free(slot, 1);
    730          1.77      yamt 			pg->flags &= ~PG_CLEAN;
    731          1.84   thorpej 			result = true;
    732          1.77      yamt 		}
    733          1.77      yamt 	}
    734          1.77      yamt 
    735          1.77      yamt 	return result;
    736          1.77      yamt }
    737          1.77      yamt 
    738          1.77      yamt /*
    739          1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    740          1.77      yamt  *
    741          1.84   thorpej  * => return true if a slot is successfully freed.
    742          1.77      yamt  */
    743          1.77      yamt 
    744          1.83   thorpej bool
    745          1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    746          1.77      yamt {
    747          1.89        ad 	kmutex_t *slock;
    748          1.83   thorpej 	bool result;
    749          1.77      yamt 
    750          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    751          1.84   thorpej 		return false;
    752          1.77      yamt 	}
    753          1.77      yamt 
    754          1.77      yamt 	/*
    755          1.77      yamt 	 * lock the page's owner.
    756          1.77      yamt 	 */
    757          1.77      yamt 
    758          1.77      yamt 	slock = uvmpd_trylockowner(pg);
    759          1.77      yamt 	if (slock == NULL) {
    760          1.84   thorpej 		return false;
    761          1.77      yamt 	}
    762          1.77      yamt 
    763          1.77      yamt 	/*
    764          1.77      yamt 	 * skip this page if it's busy.
    765          1.77      yamt 	 */
    766          1.77      yamt 
    767          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    768          1.89        ad 		mutex_exit(slock);
    769          1.84   thorpej 		return false;
    770          1.77      yamt 	}
    771          1.77      yamt 
    772          1.77      yamt 	result = uvmpd_dropswap(pg);
    773          1.77      yamt 
    774          1.89        ad 	mutex_exit(slock);
    775          1.77      yamt 
    776          1.77      yamt 	return result;
    777          1.77      yamt }
    778          1.77      yamt 
    779          1.73      yamt #endif /* defined(VMSWAP) */
    780          1.73      yamt 
    781           1.1       mrg /*
    782          1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    783          1.77      yamt  * to clean or free.
    784           1.1       mrg  *
    785           1.1       mrg  * => called with page queues locked
    786           1.1       mrg  * => we work on meeting our free target by converting inactive pages
    787           1.1       mrg  *    into free pages.
    788           1.1       mrg  * => we handle the building of swap-backed clusters
    789           1.1       mrg  */
    790           1.1       mrg 
    791          1.65   thorpej static void
    792  1.93.4.2.4.3      matt uvmpd_scan_queue(struct uvm_pggroup *grp)
    793           1.8       mrg {
    794  1.93.4.2.4.3      matt 	struct vm_page *pg;
    795           1.8       mrg 	struct uvm_object *uobj;
    796          1.37       chs 	struct vm_anon *anon;
    797          1.68      yamt #if defined(VMSWAP)
    798          1.73      yamt 	struct swapcluster swc;
    799          1.68      yamt #endif /* defined(VMSWAP) */
    800          1.77      yamt 	int dirtyreacts;
    801          1.89        ad 	int lockownerfail;
    802          1.89        ad 	kmutex_t *slock;
    803          1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    804           1.1       mrg 
    805           1.8       mrg 	/*
    806           1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    807          1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    808           1.8       mrg 	 * a swap-cluster to build.
    809           1.8       mrg 	 */
    810          1.24       chs 
    811          1.73      yamt #if defined(VMSWAP)
    812          1.73      yamt 	swapcluster_init(&swc);
    813          1.73      yamt #endif /* defined(VMSWAP) */
    814          1.77      yamt 
    815          1.14       chs 	dirtyreacts = 0;
    816          1.89        ad 	lockownerfail = 0;
    817  1.93.4.2.4.3      matt 	uvmpdpol_scaninit(grp);
    818          1.43       chs 
    819          1.77      yamt 	while (/* CONSTCOND */ 1) {
    820          1.24       chs 
    821          1.73      yamt 		/*
    822          1.73      yamt 		 * see if we've met the free target.
    823          1.73      yamt 		 */
    824          1.73      yamt 
    825  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging
    826          1.89        ad #if defined(VMSWAP)
    827          1.89        ad 		    + swapcluster_nused(&swc)
    828          1.89        ad #endif /* defined(VMSWAP) */
    829  1.93.4.2.4.3      matt 		    >= grp->pgrp_freetarg << 2 ||
    830          1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    831  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist,"  [%zd]: met free target (%u + %u >= %u): "
    832  1.93.4.2.4.3      matt 			    "exit loop", grp - uvm.pggroups,
    833  1.93.4.2.4.3      matt 			    grp->pgrp_free, grp->pgrp_paging,
    834  1.93.4.2.4.3      matt 			    grp->pgrp_freetarg << 2);
    835          1.73      yamt 			break;
    836          1.73      yamt 		}
    837          1.24       chs 
    838  1.93.4.2.4.3      matt 		pg = uvmpdpol_selectvictim(grp);
    839  1.93.4.2.4.3      matt 		if (pg == NULL) {
    840  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist,"  [%zd]: selectvictim didn't: "
    841  1.93.4.2.4.3      matt 			    "exit loop", grp - uvm.pggroups, 0, 0, 0);
    842          1.77      yamt 			break;
    843          1.77      yamt 		}
    844  1.93.4.2.4.3      matt 		KASSERT(uvmpdpol_pageisqueued_p(pg));
    845  1.93.4.2.4.3      matt 		KASSERT(pg->wire_count == 0);
    846          1.77      yamt 
    847          1.73      yamt 		/*
    848          1.73      yamt 		 * we are below target and have a new page to consider.
    849          1.73      yamt 		 */
    850          1.30       chs 
    851  1.93.4.2.4.3      matt 		anon = pg->uanon;
    852  1.93.4.2.4.3      matt 		uobj = pg->uobject;
    853           1.8       mrg 
    854          1.73      yamt 		/*
    855          1.73      yamt 		 * first we attempt to lock the object that this page
    856          1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    857          1.73      yamt 		 * the next page (no harm done).  it is important to
    858          1.73      yamt 		 * "try" locking the object as we are locking in the
    859          1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    860          1.73      yamt 		 * deadlock.
    861          1.73      yamt 		 *
    862          1.73      yamt 		 * the only time we expect to see an ownerless page
    863          1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    864          1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    865          1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    866          1.73      yamt 		 * case, the anon can "take over" the loaned page
    867          1.73      yamt 		 * and make it its own.
    868          1.73      yamt 		 */
    869          1.30       chs 
    870  1.93.4.2.4.3      matt 		slock = uvmpd_trylockowner(pg);
    871          1.76      yamt 		if (slock == NULL) {
    872          1.89        ad 			/*
    873          1.89        ad 			 * yield cpu to make a chance for an LWP holding
    874          1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    875          1.89        ad 			 * if the page queue is filled with a lot of pages
    876          1.89        ad 			 * from few objects.
    877          1.89        ad 			 */
    878          1.89        ad 			lockownerfail++;
    879          1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    880          1.89        ad 				mutex_exit(&uvm_pageqlock);
    881          1.89        ad 				/* XXX Better than yielding but inadequate. */
    882          1.89        ad 				kpause("livelock", false, 1, NULL);
    883          1.89        ad 				mutex_enter(&uvm_pageqlock);
    884          1.89        ad 				lockownerfail = 0;
    885          1.89        ad 			}
    886          1.76      yamt 			continue;
    887          1.76      yamt 		}
    888  1.93.4.2.4.3      matt 		if (pg->flags & PG_BUSY) {
    889          1.89        ad 			mutex_exit(slock);
    890  1.93.4.2.4.3      matt 			grp->pgrp_pdbusy++;
    891          1.76      yamt 			continue;
    892          1.76      yamt 		}
    893          1.76      yamt 
    894          1.73      yamt 		/* does the page belong to an object? */
    895          1.73      yamt 		if (uobj != NULL) {
    896  1.93.4.2.4.3      matt 			grp->pgrp_pdobscan++;
    897          1.73      yamt 		} else {
    898          1.73      yamt #if defined(VMSWAP)
    899          1.73      yamt 			KASSERT(anon != NULL);
    900  1.93.4.2.4.3      matt 			grp->pgrp_pdanscan++;
    901          1.68      yamt #else /* defined(VMSWAP) */
    902          1.73      yamt 			panic("%s: anon", __func__);
    903          1.68      yamt #endif /* defined(VMSWAP) */
    904          1.73      yamt 		}
    905           1.8       mrg 
    906          1.37       chs 
    907          1.73      yamt 		/*
    908          1.73      yamt 		 * we now have the object and the page queues locked.
    909          1.73      yamt 		 * if the page is not swap-backed, call the object's
    910          1.73      yamt 		 * pager to flush and free the page.
    911          1.73      yamt 		 */
    912          1.37       chs 
    913          1.69      yamt #if defined(READAHEAD_STATS)
    914  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_READAHEAD) != 0) {
    915  1.93.4.2.4.3      matt 			pg->pqflags &= ~PQ_READAHEAD;
    916          1.73      yamt 			uvm_ra_miss.ev_count++;
    917          1.73      yamt 		}
    918          1.69      yamt #endif /* defined(READAHEAD_STATS) */
    919          1.69      yamt 
    920  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
    921          1.82       alc 			KASSERT(uobj != NULL);
    922          1.89        ad 			mutex_exit(&uvm_pageqlock);
    923  1.93.4.2.4.3      matt 			(void) (uobj->pgops->pgo_put)(uobj, pg->offset,
    924  1.93.4.2.4.3      matt 			    pg->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    925          1.89        ad 			mutex_enter(&uvm_pageqlock);
    926          1.73      yamt 			continue;
    927          1.73      yamt 		}
    928          1.37       chs 
    929          1.73      yamt 		/*
    930          1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    931          1.73      yamt 		 * from the page so we can sync the modified info
    932          1.73      yamt 		 * without any race conditions.  if the page is clean
    933          1.73      yamt 		 * we can free it now and continue.
    934          1.73      yamt 		 */
    935           1.8       mrg 
    936  1.93.4.2.4.3      matt 		pmap_page_protect(pg, VM_PROT_NONE);
    937  1.93.4.2.4.3      matt 		if ((pg->flags & PG_CLEAN) && pmap_clear_modify(pg)) {
    938  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_CLEAN);
    939          1.73      yamt 		}
    940  1.93.4.2.4.3      matt 		if (pg->flags & PG_CLEAN) {
    941          1.73      yamt 			int slot;
    942          1.73      yamt 			int pageidx;
    943          1.73      yamt 
    944  1.93.4.2.4.3      matt 			pageidx = pg->offset >> PAGE_SHIFT;
    945  1.93.4.2.4.3      matt 			KASSERT(!uvmpdpol_pageisqueued_p(pg));
    946  1.93.4.2.4.3      matt 			uvm_pagefree(pg);
    947  1.93.4.2.4.3      matt 			grp->pgrp_pdfreed++;
    948           1.8       mrg 
    949           1.8       mrg 			/*
    950          1.73      yamt 			 * for anons, we need to remove the page
    951          1.73      yamt 			 * from the anon ourselves.  for aobjs,
    952          1.73      yamt 			 * pagefree did that for us.
    953           1.8       mrg 			 */
    954          1.24       chs 
    955          1.73      yamt 			if (anon) {
    956          1.73      yamt 				KASSERT(anon->an_swslot != 0);
    957          1.73      yamt 				anon->an_page = NULL;
    958          1.73      yamt 				slot = anon->an_swslot;
    959          1.73      yamt 			} else {
    960          1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
    961           1.8       mrg 			}
    962          1.89        ad 			mutex_exit(slock);
    963           1.8       mrg 
    964          1.73      yamt 			if (slot > 0) {
    965          1.73      yamt 				/* this page is now only in swap. */
    966          1.87        ad 				mutex_enter(&uvm_swap_data_lock);
    967          1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    968          1.73      yamt 				uvmexp.swpgonly++;
    969          1.87        ad 				mutex_exit(&uvm_swap_data_lock);
    970          1.37       chs 			}
    971          1.73      yamt 			continue;
    972          1.73      yamt 		}
    973          1.37       chs 
    974          1.77      yamt #if defined(VMSWAP)
    975          1.73      yamt 		/*
    976          1.73      yamt 		 * this page is dirty, skip it if we'll have met our
    977          1.73      yamt 		 * free target when all the current pageouts complete.
    978          1.73      yamt 		 */
    979          1.24       chs 
    980  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging > grp->pgrp_freetarg << 2) {
    981          1.89        ad 			mutex_exit(slock);
    982          1.73      yamt 			continue;
    983          1.73      yamt 		}
    984          1.14       chs 
    985          1.73      yamt 		/*
    986          1.73      yamt 		 * free any swap space allocated to the page since
    987          1.73      yamt 		 * we'll have to write it again with its new data.
    988          1.73      yamt 		 */
    989          1.24       chs 
    990  1.93.4.2.4.3      matt 		uvmpd_dropswap(pg);
    991          1.14       chs 
    992          1.73      yamt 		/*
    993          1.73      yamt 		 * start new swap pageout cluster (if necessary).
    994      1.93.4.1       snj 		 *
    995      1.93.4.1       snj 		 * if swap is full reactivate this page so that
    996      1.93.4.1       snj 		 * we eventually cycle all pages through the
    997      1.93.4.1       snj 		 * inactive queue.
    998           1.8       mrg 		 */
    999          1.24       chs 
   1000          1.73      yamt 		if (swapcluster_allocslots(&swc)) {
   1001      1.93.4.1       snj 			dirtyreacts++;
   1002  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1003          1.89        ad 			mutex_exit(slock);
   1004          1.73      yamt 			continue;
   1005           1.8       mrg 		}
   1006           1.8       mrg 
   1007           1.8       mrg 		/*
   1008          1.73      yamt 		 * at this point, we're definitely going reuse this
   1009          1.73      yamt 		 * page.  mark the page busy and delayed-free.
   1010          1.73      yamt 		 * we should remove the page from the page queues
   1011          1.73      yamt 		 * so we don't ever look at it again.
   1012          1.73      yamt 		 * adjust counters and such.
   1013           1.8       mrg 		 */
   1014           1.8       mrg 
   1015  1.93.4.2.4.3      matt 		pg->flags |= PG_BUSY;
   1016  1.93.4.2.4.3      matt 		UVM_PAGE_OWN(pg, "scan_queue");
   1017          1.73      yamt 
   1018  1.93.4.2.4.3      matt 		pg->flags |= PG_PAGEOUT;
   1019  1.93.4.2.4.3      matt 		uvm_pagedequeue(pg);
   1020          1.73      yamt 
   1021  1.93.4.2.4.3      matt 		grp->pgrp_pgswapout++;
   1022          1.89        ad 		mutex_exit(&uvm_pageqlock);
   1023           1.8       mrg 
   1024           1.8       mrg 		/*
   1025          1.73      yamt 		 * add the new page to the cluster.
   1026           1.8       mrg 		 */
   1027           1.8       mrg 
   1028  1.93.4.2.4.3      matt 		if (swapcluster_add(&swc, pg)) {
   1029  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_BUSY|PG_PAGEOUT);
   1030  1.93.4.2.4.3      matt 			UVM_PAGE_OWN(pg, NULL);
   1031          1.89        ad 			mutex_enter(&uvm_pageqlock);
   1032          1.77      yamt 			dirtyreacts++;
   1033  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1034          1.89        ad 			mutex_exit(slock);
   1035          1.73      yamt 			continue;
   1036          1.73      yamt 		}
   1037          1.89        ad 		mutex_exit(slock);
   1038          1.73      yamt 
   1039  1.93.4.2.4.3      matt 		swapcluster_flush(grp, &swc, false);
   1040          1.89        ad 		mutex_enter(&uvm_pageqlock);
   1041          1.73      yamt 
   1042           1.8       mrg 		/*
   1043          1.31       chs 		 * the pageout is in progress.  bump counters and set up
   1044          1.31       chs 		 * for the next loop.
   1045           1.8       mrg 		 */
   1046           1.8       mrg 
   1047          1.31       chs 		uvmexp.pdpending++;
   1048          1.77      yamt #else /* defined(VMSWAP) */
   1049  1.93.4.2.4.3      matt 		uvm_pageactivate(pg);
   1050          1.89        ad 		mutex_exit(slock);
   1051          1.77      yamt #endif /* defined(VMSWAP) */
   1052          1.73      yamt 	}
   1053          1.73      yamt 
   1054          1.73      yamt #if defined(VMSWAP)
   1055          1.89        ad 	mutex_exit(&uvm_pageqlock);
   1056  1.93.4.2.4.3      matt 	swapcluster_flush(grp, &swc, true);
   1057          1.89        ad 	mutex_enter(&uvm_pageqlock);
   1058          1.68      yamt #endif /* defined(VMSWAP) */
   1059           1.1       mrg }
   1060           1.1       mrg 
   1061           1.1       mrg /*
   1062           1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
   1063           1.1       mrg  *
   1064           1.1       mrg  * => called with pageq's locked
   1065           1.1       mrg  */
   1066           1.1       mrg 
   1067          1.65   thorpej static void
   1068  1.93.4.2.4.3      matt uvmpd_scan(struct uvm_pggroup *grp)
   1069           1.1       mrg {
   1070  1.93.4.2.4.3      matt 	u_int swap_shortage, pages_freed;
   1071           1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
   1072           1.1       mrg 
   1073  1.93.4.2.4.3      matt 	grp->pgrp_pdrevs++;
   1074           1.1       mrg 
   1075           1.8       mrg 	/*
   1076          1.93        ad 	 * work on meeting our targets.   first we work on our free target
   1077          1.93        ad 	 * by converting inactive pages into free pages.  then we work on
   1078          1.93        ad 	 * meeting our inactive target by converting active pages to
   1079          1.93        ad 	 * inactive ones.
   1080           1.8       mrg 	 */
   1081           1.8       mrg 
   1082           1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
   1083           1.8       mrg 
   1084  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed;
   1085  1.93.4.2.4.3      matt 	uvmpd_scan_queue(grp);
   1086  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed - pages_freed;
   1087           1.8       mrg 
   1088           1.8       mrg 	/*
   1089          1.14       chs 	 * detect if we're not going to be able to page anything out
   1090          1.14       chs 	 * until we free some swap resources from active pages.
   1091          1.14       chs 	 */
   1092          1.24       chs 
   1093          1.14       chs 	swap_shortage = 0;
   1094  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freetarg &&
   1095          1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
   1096          1.52        pk 	    !uvm_swapisfull() &&
   1097          1.14       chs 	    pages_freed == 0) {
   1098  1.93.4.2.4.3      matt 		swap_shortage = grp->pgrp_freetarg - grp->pgrp_free;
   1099          1.14       chs 	}
   1100          1.24       chs 
   1101  1.93.4.2.4.3      matt 	uvmpdpol_balancequeue(grp, swap_shortage);
   1102          1.93        ad 
   1103          1.93        ad 	/*
   1104          1.93        ad 	 * swap out some processes if we are still below the minimum
   1105          1.93        ad 	 * free target.  we need to unlock the page queues for this.
   1106          1.93        ad 	 */
   1107          1.93        ad 
   1108  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freemin
   1109  1.93.4.2.4.3      matt 	    && uvmexp.nswapdev != 0 && uvm.swapout_enabled) {
   1110  1.93.4.2.4.3      matt 		grp->pgrp_pdswout++;
   1111          1.93        ad 		UVMHIST_LOG(pdhist,"  free %d < min %d: swapout",
   1112          1.93        ad 		    uvmexp.free, uvmexp.freemin, 0, 0);
   1113          1.93        ad 		mutex_exit(&uvm_pageqlock);
   1114          1.93        ad 		uvm_swapout_threads();
   1115          1.93        ad 		mutex_enter(&uvm_pageqlock);
   1116          1.93        ad 
   1117          1.93        ad 	}
   1118           1.1       mrg }
   1119          1.62      yamt 
   1120          1.62      yamt /*
   1121          1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
   1122          1.62      yamt  *
   1123          1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
   1124          1.62      yamt  *
   1125          1.62      yamt  * XXX should be tunable.
   1126          1.62      yamt  * XXX should consider pools, etc?
   1127          1.62      yamt  */
   1128          1.62      yamt 
   1129          1.83   thorpej bool
   1130          1.62      yamt uvm_reclaimable(void)
   1131          1.62      yamt {
   1132          1.62      yamt 	int filepages;
   1133          1.77      yamt 	int active, inactive;
   1134          1.62      yamt 
   1135          1.62      yamt 	/*
   1136          1.62      yamt 	 * if swap is not full, no problem.
   1137          1.62      yamt 	 */
   1138          1.62      yamt 
   1139          1.62      yamt 	if (!uvm_swapisfull()) {
   1140          1.84   thorpej 		return true;
   1141          1.62      yamt 	}
   1142          1.62      yamt 
   1143          1.62      yamt 	/*
   1144          1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1145          1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1146          1.62      yamt 	 *
   1147          1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1148          1.63      yamt 	 *
   1149          1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1150          1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1151          1.62      yamt 	 */
   1152          1.62      yamt 
   1153          1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1154          1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1155          1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1156          1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1157          1.84   thorpej 		return true;
   1158          1.62      yamt 	}
   1159          1.62      yamt 
   1160          1.62      yamt 	/*
   1161          1.62      yamt 	 * kill the process, fail allocation, etc..
   1162          1.62      yamt 	 */
   1163          1.62      yamt 
   1164          1.84   thorpej 	return false;
   1165          1.62      yamt }
   1166          1.77      yamt 
   1167          1.77      yamt void
   1168  1.93.4.2.4.3      matt uvm_estimatepageable(u_int *active, u_int *inactive)
   1169          1.77      yamt {
   1170          1.77      yamt 
   1171          1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1172          1.77      yamt }
   1173