Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.93.4.2.4.12
      1  1.93.4.2.4.12      matt /*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.12 2012/04/14 00:49:35 matt Exp $	*/
      2            1.1       mrg 
      3           1.34       chs /*
      4            1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5           1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6            1.1       mrg  *
      7            1.1       mrg  * All rights reserved.
      8            1.1       mrg  *
      9            1.1       mrg  * This code is derived from software contributed to Berkeley by
     10            1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11            1.1       mrg  *
     12            1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13            1.1       mrg  * modification, are permitted provided that the following conditions
     14            1.1       mrg  * are met:
     15            1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16            1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17            1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18            1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19            1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20            1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21            1.1       mrg  *    must display the following acknowledgement:
     22            1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23           1.34       chs  *      Washington University, the University of California, Berkeley and
     24            1.1       mrg  *      its contributors.
     25            1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26            1.1       mrg  *    may be used to endorse or promote products derived from this software
     27            1.1       mrg  *    without specific prior written permission.
     28            1.1       mrg  *
     29            1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30            1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31            1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32            1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33            1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34            1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35            1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36            1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37            1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38            1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39            1.1       mrg  * SUCH DAMAGE.
     40            1.1       mrg  *
     41            1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42            1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43            1.1       mrg  *
     44            1.1       mrg  *
     45            1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46            1.1       mrg  * All rights reserved.
     47           1.34       chs  *
     48            1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49            1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50            1.1       mrg  * notice and this permission notice appear in all copies of the
     51            1.1       mrg  * software, derivative works or modified versions, and any portions
     52            1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53           1.34       chs  *
     54           1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55           1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56            1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57           1.34       chs  *
     58            1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59            1.1       mrg  *
     60            1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61            1.1       mrg  *  School of Computer Science
     62            1.1       mrg  *  Carnegie Mellon University
     63            1.1       mrg  *  Pittsburgh PA 15213-3890
     64            1.1       mrg  *
     65            1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66            1.1       mrg  * rights to redistribute these changes.
     67            1.1       mrg  */
     68            1.1       mrg 
     69            1.1       mrg /*
     70            1.1       mrg  * uvm_pdaemon.c: the page daemon
     71            1.1       mrg  */
     72           1.42     lukem 
     73           1.42     lukem #include <sys/cdefs.h>
     74  1.93.4.2.4.12      matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.12 2012/04/14 00:49:35 matt Exp $");
     75           1.42     lukem 
     76           1.42     lukem #include "opt_uvmhist.h"
     77           1.69      yamt #include "opt_readahead.h"
     78            1.1       mrg 
     79            1.1       mrg #include <sys/param.h>
     80            1.1       mrg #include <sys/proc.h>
     81            1.1       mrg #include <sys/systm.h>
     82            1.1       mrg #include <sys/kernel.h>
     83            1.9        pk #include <sys/pool.h>
     84           1.24       chs #include <sys/buf.h>
     85       1.93.4.2       snj #include <sys/atomic.h>
     86            1.1       mrg 
     87            1.1       mrg #include <uvm/uvm.h>
     88           1.77      yamt #include <uvm/uvm_pdpolicy.h>
     89            1.1       mrg 
     90            1.1       mrg /*
     91           1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     92           1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     93           1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     94           1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     95           1.14       chs  */
     96           1.14       chs 
     97           1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     98           1.14       chs 
     99           1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
    100           1.14       chs 
    101           1.14       chs /*
    102            1.1       mrg  * local prototypes
    103            1.1       mrg  */
    104            1.1       mrg 
    105  1.93.4.2.4.10      matt static bool	uvmpd_scan(struct uvm_pggroup *);
    106   1.93.4.2.4.3      matt static void	uvmpd_scan_queue(struct uvm_pggroup *);
    107           1.65   thorpej static void	uvmpd_tune(void);
    108            1.1       mrg 
    109   1.93.4.2.4.4      matt static void	uvmpd_checkgroup(const struct uvm_pggroup *);
    110   1.93.4.2.4.4      matt 
    111   1.93.4.2.4.3      matt static struct uvm_pdinfo {
    112   1.93.4.2.4.3      matt 	unsigned int pd_waiters;
    113   1.93.4.2.4.3      matt 	unsigned int pd_scans_neededs;
    114   1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pagingq;
    115   1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pendingq;
    116  1.93.4.2.4.12      matt 	bool pd_stalled;
    117   1.93.4.2.4.3      matt } uvm_pdinfo =  {
    118   1.93.4.2.4.3      matt 	.pd_pagingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pagingq),
    119   1.93.4.2.4.3      matt 	.pd_pendingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pendingq),
    120   1.93.4.2.4.3      matt };
    121           1.89        ad 
    122            1.1       mrg /*
    123           1.61       chs  * XXX hack to avoid hangs when large processes fork.
    124           1.61       chs  */
    125       1.93.4.2       snj u_int uvm_extrapages;
    126           1.61       chs 
    127           1.61       chs /*
    128            1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    129            1.1       mrg  *
    130            1.1       mrg  * => should be called with all locks released
    131            1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    132            1.1       mrg  */
    133            1.1       mrg 
    134           1.19   thorpej void
    135           1.65   thorpej uvm_wait(const char *wmsg)
    136            1.8       mrg {
    137            1.8       mrg 	int timo = 0;
    138           1.89        ad 
    139           1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    140            1.1       mrg 
    141            1.8       mrg 	/*
    142            1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    143            1.8       mrg 	 */
    144            1.1       mrg 
    145           1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    146            1.8       mrg 		/*
    147            1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    148            1.8       mrg 		 * sleep until it frees more memory.   but how can it
    149            1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    150            1.8       mrg 		 * we have two options:
    151            1.8       mrg 		 *  [1] panic now
    152            1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    153            1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    154            1.8       mrg 		 *
    155            1.8       mrg 		 * note that option [2] will only help us if we get lucky
    156            1.8       mrg 		 * and some other process on the system breaks the deadlock
    157            1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    158            1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    159            1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    160            1.8       mrg 		 * yet, this should never happen in the first place!).
    161            1.8       mrg 		 */
    162            1.1       mrg 
    163            1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    164            1.8       mrg 		timo = hz >> 3;		/* set timeout */
    165            1.1       mrg #if defined(DEBUG)
    166            1.8       mrg 		/* DEBUG: panic so we can debug it */
    167            1.8       mrg 		panic("pagedaemon deadlock");
    168            1.1       mrg #endif
    169            1.8       mrg 	}
    170            1.1       mrg 
    171   1.93.4.2.4.3      matt 	uvm_pdinfo.pd_waiters++;
    172  1.93.4.2.4.12      matt 	if (!uvm_pdinfo.pd_stalled)
    173  1.93.4.2.4.12      matt 		wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    174           1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    175   1.93.4.2.4.7      matt 	uvm_pdinfo.pd_waiters--;
    176            1.1       mrg }
    177            1.1       mrg 
    178   1.93.4.2.4.4      matt 
    179   1.93.4.2.4.4      matt static void
    180   1.93.4.2.4.4      matt uvmpd_checkgroup(const struct uvm_pggroup *grp)
    181   1.93.4.2.4.4      matt {
    182   1.93.4.2.4.4      matt #ifdef DEBUG
    183   1.93.4.2.4.4      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    184   1.93.4.2.4.4      matt 	bool in_pendingq = false;
    185   1.93.4.2.4.4      matt 	bool in_pagingq = false;
    186   1.93.4.2.4.4      matt 	const struct uvm_pggroup *tstgrp;
    187   1.93.4.2.4.4      matt 
    188   1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pendingq, pgrp_pending_link) {
    189   1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    190   1.93.4.2.4.4      matt 			in_pendingq = true;
    191   1.93.4.2.4.4      matt 			break;
    192   1.93.4.2.4.4      matt 		}
    193   1.93.4.2.4.4      matt 	}
    194   1.93.4.2.4.4      matt 
    195   1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pagingq, pgrp_paging_link) {
    196   1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    197   1.93.4.2.4.4      matt 			in_pagingq = true;
    198   1.93.4.2.4.4      matt 			break;
    199   1.93.4.2.4.4      matt 		}
    200   1.93.4.2.4.4      matt 	}
    201   1.93.4.2.4.4      matt 
    202   1.93.4.2.4.4      matt 	if (grp->pgrp_paging > 0) {
    203   1.93.4.2.4.4      matt 		KASSERT(in_pagingq);
    204   1.93.4.2.4.4      matt 		KASSERT(!in_pendingq);
    205   1.93.4.2.4.4      matt 	} else {
    206   1.93.4.2.4.4      matt 		KASSERT(!in_pagingq);
    207   1.93.4.2.4.4      matt 		KASSERT(in_pendingq == grp->pgrp_scan_needed);
    208   1.93.4.2.4.4      matt 	}
    209   1.93.4.2.4.4      matt #endif
    210   1.93.4.2.4.4      matt }
    211   1.93.4.2.4.4      matt 
    212           1.77      yamt /*
    213           1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    214           1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    215           1.89        ad  *
    216           1.89        ad  * => called with uvm_fpageqlock held.
    217           1.77      yamt  */
    218           1.77      yamt 
    219           1.77      yamt void
    220           1.77      yamt uvm_kick_pdaemon(void)
    221           1.77      yamt {
    222   1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    223   1.93.4.2.4.3      matt 	bool need_wakeup = false;
    224   1.93.4.2.4.3      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    225           1.77      yamt 
    226           1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    227           1.89        ad 
    228   1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    229   1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    230   1.93.4.2.4.3      matt 		const bool prev_scan_needed = grp->pgrp_scan_needed;
    231   1.93.4.2.4.3      matt 
    232   1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    233   1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    234   1.93.4.2.4.3      matt 
    235   1.93.4.2.4.3      matt 		grp->pgrp_scan_needed =
    236   1.93.4.2.4.3      matt 		    grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freemin
    237   1.93.4.2.4.3      matt 		    || (grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freetarg
    238   1.93.4.2.4.3      matt 			&& uvmpdpol_needsscan_p(grp));
    239   1.93.4.2.4.3      matt 
    240   1.93.4.2.4.3      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    241   1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d->%d (scan=%d)",
    242   1.93.4.2.4.3      matt 			    grp - uvm.pggroups, prev_scan_needed,
    243   1.93.4.2.4.3      matt 			    grp->pgrp_scan_needed, uvmpdpol_needsscan_p(grp));
    244   1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d < min(%d,%d)",
    245   1.93.4.2.4.3      matt 			    grp - uvm.pggroups,
    246   1.93.4.2.4.3      matt 			    grp->pgrp_free + grp->pgrp_paging,
    247   1.93.4.2.4.3      matt 			    grp->pgrp_freemin, grp->pgrp_freetarg);
    248   1.93.4.2.4.3      matt 		}
    249   1.93.4.2.4.3      matt 
    250   1.93.4.2.4.4      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    251   1.93.4.2.4.3      matt 			if (grp->pgrp_scan_needed) {
    252   1.93.4.2.4.9      matt 				struct uvm_pggroup *prev;
    253   1.93.4.2.4.9      matt 				TAILQ_FOREACH(prev, &pdinfo->pd_pendingq,
    254   1.93.4.2.4.9      matt 				    pgrp_pending_link) {
    255   1.93.4.2.4.9      matt 					if (grp->pgrp_free < prev->pgrp_free)
    256   1.93.4.2.4.9      matt 						break;
    257   1.93.4.2.4.9      matt 				}
    258   1.93.4.2.4.9      matt 				if (prev == NULL) {
    259   1.93.4.2.4.9      matt 					TAILQ_INSERT_TAIL(&pdinfo->pd_pendingq,
    260   1.93.4.2.4.9      matt 					    grp, pgrp_pending_link);
    261   1.93.4.2.4.9      matt 				} else {
    262   1.93.4.2.4.9      matt 					TAILQ_INSERT_BEFORE(prev, grp,
    263   1.93.4.2.4.9      matt 					    pgrp_pending_link);
    264   1.93.4.2.4.9      matt 				}
    265   1.93.4.2.4.3      matt 				need_wakeup = true;
    266   1.93.4.2.4.3      matt 			} else {
    267   1.93.4.2.4.3      matt 				TAILQ_REMOVE(&pdinfo->pd_pendingq,
    268   1.93.4.2.4.4      matt 				    grp, pgrp_pending_link);
    269   1.93.4.2.4.3      matt 			}
    270   1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    271   1.93.4.2.4.3      matt 		}
    272           1.77      yamt 	}
    273   1.93.4.2.4.3      matt 
    274  1.93.4.2.4.12      matt 	const bool stalled = pdinfo->pd_stalled;
    275  1.93.4.2.4.12      matt 	if (need_wakeup && !stalled)
    276   1.93.4.2.4.3      matt 		wakeup(&uvm.pagedaemon);
    277   1.93.4.2.4.3      matt 
    278  1.93.4.2.4.12      matt 	UVMHIST_LOG(pdhist, " <- done: wakeup=%d stalled=%d!",
    279  1.93.4.2.4.12      matt 	    need_wakeup, stalled, 0, 0);
    280           1.77      yamt }
    281            1.1       mrg 
    282            1.1       mrg /*
    283            1.1       mrg  * uvmpd_tune: tune paging parameters
    284            1.1       mrg  *
    285            1.1       mrg  * => called when ever memory is added (or removed?) to the system
    286            1.1       mrg  * => caller must call with page queues locked
    287            1.1       mrg  */
    288            1.1       mrg 
    289           1.65   thorpej static void
    290           1.37       chs uvmpd_tune(void)
    291            1.8       mrg {
    292   1.93.4.2.4.3      matt 	u_int extrapages = atomic_swap_uint(&uvm_extrapages, 0) / uvmexp.ncolors;
    293   1.93.4.2.4.3      matt 	u_int freemin = 0;
    294   1.93.4.2.4.3      matt 	u_int freetarg = 0;
    295   1.93.4.2.4.3      matt 	u_int wiredmax = 0;
    296       1.93.4.2       snj 
    297            1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    298            1.1       mrg 
    299   1.93.4.2.4.3      matt 	extrapages = roundup(extrapages, uvmexp.npggroups);
    300   1.93.4.2.4.3      matt 
    301   1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    302   1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    303   1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    304   1.93.4.2.4.3      matt 
    305   1.93.4.2.4.3      matt 		/*
    306   1.93.4.2.4.3      matt 		 * try to keep 0.5% of available RAM free, but limit
    307   1.93.4.2.4.3      matt 		 * to between 128k and 1024k per-CPU.
    308   1.93.4.2.4.3      matt 		 * XXX: what are these values good for?
    309   1.93.4.2.4.3      matt 		 */
    310   1.93.4.2.4.3      matt 		u_int val = grp->pgrp_npages / 200;
    311   1.93.4.2.4.3      matt 		val = MAX(val, (128*1024) >> PAGE_SHIFT);
    312   1.93.4.2.4.3      matt 		val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    313   1.93.4.2.4.3      matt 		val *= ncpu;
    314   1.93.4.2.4.3      matt 
    315   1.93.4.2.4.3      matt 		/* Make sure there's always a user page free. */
    316   1.93.4.2.4.3      matt 		if (val * uvmexp.npggroups <= uvmexp.reserve_kernel)
    317   1.93.4.2.4.3      matt 			val = uvmexp.reserve_kernel / uvmexp.npggroups + 1;
    318   1.93.4.2.4.3      matt 
    319   1.93.4.2.4.3      matt 		grp->pgrp_freemin = val;
    320   1.93.4.2.4.3      matt 
    321   1.93.4.2.4.3      matt 		/* Calculate freetarg. */
    322   1.93.4.2.4.3      matt 		val = (grp->pgrp_freemin * 4) / 3;
    323   1.93.4.2.4.3      matt 		if (val <= grp->pgrp_freemin)
    324   1.93.4.2.4.3      matt 			val = grp->pgrp_freemin + 1;
    325   1.93.4.2.4.8      matt #ifdef VM_FREELIST_NORMALOK_P
    326   1.93.4.2.4.8      matt 		if (!VM_FREELIST_NORMALOK_P(grp->pgrp_free_list))
    327   1.93.4.2.4.8      matt 			val *= 4;
    328   1.93.4.2.4.8      matt #endif
    329   1.93.4.2.4.3      matt 		grp->pgrp_freetarg = val + extrapages / uvmexp.npggroups;
    330   1.93.4.2.4.3      matt 		if (grp->pgrp_freetarg > grp->pgrp_npages / 2)
    331   1.93.4.2.4.3      matt 			grp->pgrp_freetarg = grp->pgrp_npages / 2;
    332   1.93.4.2.4.3      matt 
    333   1.93.4.2.4.3      matt 		grp->pgrp_wiredmax = grp->pgrp_npages / 3;
    334   1.93.4.2.4.3      matt 		UVMHIST_LOG(pdhist,
    335   1.93.4.2.4.3      matt 		    "[%zd]: freemin=%d, freetarg=%d, wiredmax=%d",
    336   1.93.4.2.4.3      matt 		    grp - uvm.pggroups, grp->pgrp_freemin, grp->pgrp_freetarg,
    337   1.93.4.2.4.3      matt 		    grp->pgrp_wiredmax);
    338   1.93.4.2.4.3      matt 
    339   1.93.4.2.4.3      matt 		freemin += grp->pgrp_freemin;
    340   1.93.4.2.4.3      matt 		freetarg += grp->pgrp_freetarg;
    341   1.93.4.2.4.3      matt 		wiredmax += grp->pgrp_wiredmax;
    342   1.93.4.2.4.3      matt 	}
    343   1.93.4.2.4.3      matt 
    344   1.93.4.2.4.3      matt 	uvmexp.freemin = freemin;
    345   1.93.4.2.4.3      matt 	uvmexp.freetarg = freetarg;
    346   1.93.4.2.4.3      matt 	uvmexp.wiredmax = wiredmax;
    347           1.61       chs 
    348            1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    349   1.93.4.2.4.3      matt 	    uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    350            1.1       mrg }
    351            1.1       mrg 
    352            1.1       mrg /*
    353            1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    354            1.1       mrg  */
    355            1.1       mrg 
    356            1.8       mrg void
    357           1.80      yamt uvm_pageout(void *arg)
    358            1.8       mrg {
    359   1.93.4.2.4.3      matt 	u_int npages = 0;
    360   1.93.4.2.4.3      matt 	u_int extrapages = 0;
    361   1.93.4.2.4.3      matt 	u_int npggroups = 0;
    362           1.88        ad 	struct pool *pp;
    363           1.88        ad 	uint64_t where;
    364   1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    365  1.93.4.2.4.10      matt 	bool progress = true;
    366            1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    367           1.24       chs 
    368            1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    369            1.8       mrg 
    370            1.8       mrg 	/*
    371            1.8       mrg 	 * ensure correct priority and set paging parameters...
    372            1.8       mrg 	 */
    373            1.8       mrg 
    374           1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    375           1.89        ad 	mutex_enter(&uvm_pageqlock);
    376            1.8       mrg 	npages = uvmexp.npages;
    377            1.8       mrg 	uvmpd_tune();
    378           1.89        ad 	mutex_exit(&uvm_pageqlock);
    379            1.8       mrg 
    380            1.8       mrg 	/*
    381            1.8       mrg 	 * main loop
    382            1.8       mrg 	 */
    383           1.24       chs 
    384           1.24       chs 	for (;;) {
    385   1.93.4.2.4.3      matt 		struct uvm_pggroup *grp;
    386   1.93.4.2.4.3      matt 		bool need_free = false;
    387   1.93.4.2.4.3      matt 		u_int bufcnt = 0;
    388           1.24       chs 
    389           1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    390   1.93.4.2.4.3      matt 		/*
    391   1.93.4.2.4.3      matt 		 * If we have no one waiting or all color requests have
    392   1.93.4.2.4.3      matt 		 * active paging, then wait.
    393   1.93.4.2.4.3      matt 		 */
    394  1.93.4.2.4.10      matt 		if (progress == false
    395  1.93.4.2.4.10      matt 		    || (pdinfo->pd_waiters == 0
    396  1.93.4.2.4.10      matt 		        && TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL)) {
    397           1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    398  1.93.4.2.4.12      matt 			pdinfo->pd_stalled = !progress
    399  1.93.4.2.4.12      matt 			    && pdinfo->pd_waiters > 0;
    400  1.93.4.2.4.12      matt 			int timo = (pdinfo->pd_stalled ? 2 * hz : 0);
    401           1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    402  1.93.4.2.4.10      matt 			    &uvm_fpageqlock, false, "pgdaemon", timo);
    403           1.89        ad 			uvmexp.pdwoke++;
    404           1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    405  1.93.4.2.4.12      matt 			pdinfo->pd_stalled = false;
    406  1.93.4.2.4.10      matt 			progress = false;
    407   1.93.4.2.4.7      matt 		} else if (TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL) {
    408   1.93.4.2.4.7      matt 			/*
    409   1.93.4.2.4.7      matt 			 * Someone is waiting but no group are pending.
    410   1.93.4.2.4.7      matt 			 * Let's kick ourselves to find groups that need work.
    411   1.93.4.2.4.7      matt 			 */
    412   1.93.4.2.4.7      matt 			uvm_kick_pdaemon();
    413   1.93.4.2.4.7      matt 			mutex_spin_exit(&uvm_fpageqlock);
    414           1.89        ad 		} else {
    415           1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    416           1.89        ad 		}
    417           1.24       chs 
    418            1.8       mrg 		/*
    419           1.24       chs 		 * now lock page queues and recompute inactive count
    420            1.8       mrg 		 */
    421            1.8       mrg 
    422           1.89        ad 		mutex_enter(&uvm_pageqlock);
    423   1.93.4.2.4.3      matt 		mutex_spin_enter(&uvm_fpageqlock);
    424   1.93.4.2.4.3      matt 
    425   1.93.4.2.4.3      matt 		if (npages != uvmexp.npages
    426   1.93.4.2.4.3      matt 		    || extrapages != uvm_extrapages
    427   1.93.4.2.4.3      matt 		    || npggroups != uvmexp.npggroups) {
    428           1.24       chs 			npages = uvmexp.npages;
    429           1.61       chs 			extrapages = uvm_extrapages;
    430   1.93.4.2.4.3      matt 			npggroups = uvmexp.npggroups;
    431           1.24       chs 			uvmpd_tune();
    432           1.24       chs 		}
    433           1.24       chs 
    434           1.60     enami 		/*
    435           1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    436           1.60     enami 		 * system only when entire pool page is empty.
    437           1.60     enami 		 */
    438   1.93.4.2.4.3      matt 		bool need_wakeup = false;
    439   1.93.4.2.4.3      matt 		while ((grp = TAILQ_FIRST(&pdinfo->pd_pendingq)) != NULL) {
    440   1.93.4.2.4.3      matt 			KASSERT(grp->pgrp_npages > 0);
    441           1.60     enami 
    442   1.93.4.2.4.3      matt 			uvmpdpol_tune(grp);
    443            1.8       mrg 
    444   1.93.4.2.4.4      matt 			/*
    445   1.93.4.2.4.4      matt 			 * While we are locked, remove this from the pendingq.
    446   1.93.4.2.4.4      matt 			 */
    447   1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    448   1.93.4.2.4.4      matt 			KASSERT(grp->pgrp_scan_needed);
    449   1.93.4.2.4.4      matt 			TAILQ_REMOVE(&pdinfo->pd_pendingq, grp,
    450   1.93.4.2.4.4      matt 			    pgrp_pending_link);
    451   1.93.4.2.4.4      matt 			grp->pgrp_scan_needed = false;
    452   1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    453   1.93.4.2.4.4      matt 
    454   1.93.4.2.4.3      matt 			int diff = grp->pgrp_freetarg - grp->pgrp_free;
    455   1.93.4.2.4.3      matt 			if (diff < 0)
    456   1.93.4.2.4.3      matt 				diff = 0;
    457           1.89        ad 
    458   1.93.4.2.4.3      matt 			bufcnt += diff;
    459            1.8       mrg 
    460   1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist," [%zu]: "
    461   1.93.4.2.4.3      matt 			    "free/ftarg/fmin=%u/%u/%u",
    462   1.93.4.2.4.3      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    463   1.93.4.2.4.3      matt 			    grp->pgrp_freetarg, grp->pgrp_freemin);
    464   1.93.4.2.4.3      matt 
    465   1.93.4.2.4.3      matt 
    466   1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff)
    467   1.93.4.2.4.3      matt 				need_free = true;
    468   1.93.4.2.4.3      matt 
    469   1.93.4.2.4.3      matt 			/*
    470   1.93.4.2.4.3      matt 			 * scan if needed
    471   1.93.4.2.4.3      matt 			 */
    472  1.93.4.2.4.12      matt 			bool local_progress = false;
    473   1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff
    474   1.93.4.2.4.3      matt 			    || uvmpdpol_needsscan_p(grp)) {
    475   1.93.4.2.4.3      matt 				mutex_spin_exit(&uvm_fpageqlock);
    476  1.93.4.2.4.12      matt 				if (uvmpd_scan(grp)) {
    477  1.93.4.2.4.10      matt 					progress = true;
    478  1.93.4.2.4.12      matt 					local_progress = true;
    479  1.93.4.2.4.12      matt 				}
    480   1.93.4.2.4.3      matt 				mutex_spin_enter(&uvm_fpageqlock);
    481   1.93.4.2.4.3      matt 			} else {
    482   1.93.4.2.4.3      matt 				UVMHIST_LOG(pdhist,
    483   1.93.4.2.4.3      matt 				    " [%zu]: diff/paging=%u/%u: "
    484   1.93.4.2.4.3      matt 				    "scan skipped",
    485   1.93.4.2.4.3      matt 				    grp - uvm.pggroups, diff,
    486   1.93.4.2.4.3      matt 				    grp->pgrp_paging, 0);
    487   1.93.4.2.4.3      matt 			}
    488   1.93.4.2.4.3      matt 
    489   1.93.4.2.4.3      matt 			/*
    490  1.93.4.2.4.12      matt 			 * if there's any free memory to be had for this group,
    491  1.93.4.2.4.12      matt 			 * wake up any waiters but only if we made progress for
    492  1.93.4.2.4.12      matt 			 * this group.
    493   1.93.4.2.4.3      matt 			 */
    494   1.93.4.2.4.3      matt 			if (grp->pgrp_free * uvmexp.npggroups > uvmexp.reserve_kernel
    495  1.93.4.2.4.12      matt 			    || (local_progress && grp->pgrp_paging == 0)) {
    496   1.93.4.2.4.3      matt 				need_wakeup = true;
    497   1.93.4.2.4.3      matt 			}
    498   1.93.4.2.4.3      matt 
    499   1.93.4.2.4.3      matt 		}
    500   1.93.4.2.4.3      matt 		if (need_wakeup) {
    501           1.24       chs 			wakeup(&uvmexp.free);
    502            1.8       mrg 		}
    503  1.93.4.2.4.12      matt 		KASSERT(!need_free || need_wakeup);
    504           1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    505            1.1       mrg 
    506            1.8       mrg 		/*
    507   1.93.4.2.4.3      matt 		 * scan done.  unlock page queues (the only lock
    508   1.93.4.2.4.3      matt 		 * we are holding)
    509            1.8       mrg 		 */
    510           1.89        ad 		mutex_exit(&uvm_pageqlock);
    511           1.38       chs 
    512           1.88        ad 		/*
    513           1.93        ad 		 * if we don't need free memory, we're done.
    514           1.93        ad 		 */
    515           1.93        ad 
    516   1.93.4.2.4.3      matt 		if (!need_free)
    517           1.93        ad 			continue;
    518           1.93        ad 
    519           1.93        ad 		/*
    520           1.88        ad 		 * start draining pool resources now that we're not
    521           1.88        ad 		 * holding any locks.
    522           1.88        ad 		 */
    523           1.88        ad 		pool_drain_start(&pp, &where);
    524           1.60     enami 
    525           1.38       chs 		/*
    526           1.88        ad 		 * kill unused metadata buffers.
    527           1.38       chs 		 */
    528   1.93.4.2.4.3      matt 		if (bufcnt > 0) {
    529   1.93.4.2.4.3      matt 			mutex_enter(&bufcache_lock);
    530   1.93.4.2.4.3      matt 			buf_drain(bufcnt << PAGE_SHIFT);
    531   1.93.4.2.4.3      matt 			mutex_exit(&bufcache_lock);
    532   1.93.4.2.4.3      matt 		}
    533           1.57  jdolecek 
    534           1.57  jdolecek 		/*
    535           1.88        ad 		 * complete draining the pools.
    536           1.88        ad 		 */
    537           1.88        ad 		pool_drain_end(pp, where);
    538           1.24       chs 	}
    539           1.24       chs 	/*NOTREACHED*/
    540           1.24       chs }
    541           1.24       chs 
    542            1.8       mrg 
    543           1.24       chs /*
    544           1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    545           1.24       chs  */
    546            1.8       mrg 
    547           1.24       chs void
    548           1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    549           1.24       chs {
    550           1.81      yamt 	struct buf *bp = (void *)wk;
    551            1.9        pk 
    552           1.81      yamt 	KASSERT(&bp->b_work == wk);
    553            1.8       mrg 
    554           1.81      yamt 	/*
    555           1.81      yamt 	 * process an i/o that's done.
    556           1.81      yamt 	 */
    557            1.8       mrg 
    558           1.81      yamt 	(*bp->b_iodone)(bp);
    559           1.89        ad }
    560           1.89        ad 
    561           1.89        ad void
    562   1.93.4.2.4.3      matt uvm_pageout_start(struct uvm_pggroup *grp, u_int npages)
    563           1.89        ad {
    564   1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    565           1.89        ad 
    566           1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    567   1.93.4.2.4.3      matt 
    568   1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    569   1.93.4.2.4.5      matt 	uvmexp.paging += npages;
    570   1.93.4.2.4.3      matt 	if (grp->pgrp_paging == 0) {
    571   1.93.4.2.4.9      matt 		/*
    572   1.93.4.2.4.9      matt 		 * If the group is in a paging queue, it can't be in a pending
    573   1.93.4.2.4.9      matt 		 * queue so remove it if it is.
    574   1.93.4.2.4.9      matt 		 */
    575   1.93.4.2.4.9      matt 		if (grp->pgrp_scan_needed) {
    576   1.93.4.2.4.9      matt 			TAILQ_REMOVE(&pdinfo->pd_pendingq, grp,
    577   1.93.4.2.4.9      matt 			    pgrp_pending_link);
    578   1.93.4.2.4.9      matt 			grp->pgrp_scan_needed = false;
    579   1.93.4.2.4.9      matt 		}
    580   1.93.4.2.4.4      matt 		TAILQ_INSERT_TAIL(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    581   1.93.4.2.4.3      matt 	}
    582   1.93.4.2.4.3      matt 	grp->pgrp_paging += npages;
    583   1.93.4.2.4.5      matt 	uvmpd_checkgroup(grp);
    584           1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    585           1.89        ad }
    586           1.89        ad 
    587           1.89        ad void
    588   1.93.4.2.4.3      matt uvm_pageout_done(struct vm_page *pg, bool freed)
    589           1.89        ad {
    590   1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    591   1.93.4.2.4.3      matt 
    592   1.93.4.2.4.3      matt 	KASSERT(pg->flags & PG_PAGEOUT);
    593           1.89        ad 
    594           1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    595   1.93.4.2.4.3      matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    596   1.93.4.2.4.3      matt 
    597   1.93.4.2.4.3      matt 	KASSERT(grp->pgrp_paging > 0);
    598   1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    599   1.93.4.2.4.3      matt 	if (--grp->pgrp_paging == 0) {
    600   1.93.4.2.4.4      matt 		TAILQ_REMOVE(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    601   1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    602   1.93.4.2.4.3      matt 	}
    603   1.93.4.2.4.3      matt 	KASSERT(uvmexp.paging > 0);
    604   1.93.4.2.4.3      matt 	uvmexp.paging--;
    605   1.93.4.2.4.3      matt 	grp->pgrp_pdfreed += freed;
    606           1.89        ad 
    607           1.89        ad 	/*
    608   1.93.4.2.4.5      matt 	 * Page is no longer being paged out.
    609   1.93.4.2.4.5      matt 	 */
    610   1.93.4.2.4.5      matt 	pg->flags &= ~PG_PAGEOUT;
    611   1.93.4.2.4.5      matt 
    612   1.93.4.2.4.5      matt 	/*
    613           1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    614           1.89        ad 	 */
    615   1.93.4.2.4.3      matt 	if (grp->pgrp_free * uvmexp.npggroups <= uvmexp.reserve_kernel) {
    616           1.81      yamt 		wakeup(&uvm.pagedaemon);
    617           1.81      yamt 	} else {
    618           1.81      yamt 		wakeup(&uvmexp.free);
    619            1.8       mrg 	}
    620   1.93.4.2.4.3      matt 
    621           1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    622            1.1       mrg }
    623            1.1       mrg 
    624           1.76      yamt /*
    625           1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    626           1.76      yamt  *
    627           1.76      yamt  * => called with pageq locked.
    628           1.76      yamt  * => resolve orphaned O->A loaned page.
    629           1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    630           1.76      yamt  */
    631           1.76      yamt 
    632           1.89        ad kmutex_t *
    633           1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    634           1.76      yamt {
    635           1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    636           1.89        ad 	kmutex_t *slock;
    637           1.89        ad 
    638           1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    639           1.76      yamt 
    640           1.76      yamt 	if (uobj != NULL) {
    641           1.76      yamt 		slock = &uobj->vmobjlock;
    642           1.76      yamt 	} else {
    643           1.76      yamt 		struct vm_anon *anon = pg->uanon;
    644           1.76      yamt 
    645           1.76      yamt 		KASSERT(anon != NULL);
    646           1.76      yamt 		slock = &anon->an_lock;
    647           1.76      yamt 	}
    648           1.76      yamt 
    649           1.89        ad 	if (!mutex_tryenter(slock)) {
    650           1.76      yamt 		return NULL;
    651           1.76      yamt 	}
    652           1.76      yamt 
    653           1.76      yamt 	if (uobj == NULL) {
    654           1.76      yamt 
    655           1.76      yamt 		/*
    656           1.76      yamt 		 * set PQ_ANON if it isn't set already.
    657           1.76      yamt 		 */
    658           1.76      yamt 
    659           1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    660           1.76      yamt 			KASSERT(pg->loan_count > 0);
    661           1.76      yamt 			pg->loan_count--;
    662           1.76      yamt 			pg->pqflags |= PQ_ANON;
    663           1.76      yamt 			/* anon now owns it */
    664           1.76      yamt 		}
    665           1.76      yamt 	}
    666           1.76      yamt 
    667           1.76      yamt 	return slock;
    668           1.76      yamt }
    669           1.76      yamt 
    670           1.73      yamt #if defined(VMSWAP)
    671           1.73      yamt struct swapcluster {
    672           1.73      yamt 	int swc_slot;
    673           1.73      yamt 	int swc_nallocated;
    674           1.73      yamt 	int swc_nused;
    675           1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    676           1.73      yamt };
    677           1.73      yamt 
    678           1.73      yamt static void
    679           1.73      yamt swapcluster_init(struct swapcluster *swc)
    680           1.73      yamt {
    681           1.73      yamt 
    682           1.73      yamt 	swc->swc_slot = 0;
    683           1.89        ad 	swc->swc_nused = 0;
    684           1.73      yamt }
    685           1.73      yamt 
    686           1.73      yamt static int
    687           1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    688           1.73      yamt {
    689           1.73      yamt 	int slot;
    690           1.73      yamt 	int npages;
    691           1.73      yamt 
    692           1.73      yamt 	if (swc->swc_slot != 0) {
    693           1.73      yamt 		return 0;
    694           1.73      yamt 	}
    695           1.73      yamt 
    696           1.73      yamt 	/* Even with strange MAXPHYS, the shift
    697           1.73      yamt 	   implicitly rounds down to a page. */
    698           1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    699           1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    700           1.73      yamt 	if (slot == 0) {
    701           1.73      yamt 		return ENOMEM;
    702           1.73      yamt 	}
    703           1.73      yamt 	swc->swc_slot = slot;
    704           1.73      yamt 	swc->swc_nallocated = npages;
    705           1.73      yamt 	swc->swc_nused = 0;
    706           1.73      yamt 
    707           1.73      yamt 	return 0;
    708           1.73      yamt }
    709           1.73      yamt 
    710           1.73      yamt static int
    711           1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    712           1.73      yamt {
    713           1.73      yamt 	int slot;
    714           1.73      yamt 	struct uvm_object *uobj;
    715           1.73      yamt 
    716           1.73      yamt 	KASSERT(swc->swc_slot != 0);
    717           1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    718           1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    719           1.73      yamt 
    720           1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    721           1.73      yamt 	uobj = pg->uobject;
    722           1.73      yamt 	if (uobj == NULL) {
    723           1.89        ad 		KASSERT(mutex_owned(&pg->uanon->an_lock));
    724           1.73      yamt 		pg->uanon->an_swslot = slot;
    725           1.73      yamt 	} else {
    726           1.73      yamt 		int result;
    727           1.73      yamt 
    728           1.89        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
    729           1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    730           1.73      yamt 		if (result == -1) {
    731           1.73      yamt 			return ENOMEM;
    732           1.73      yamt 		}
    733           1.73      yamt 	}
    734           1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    735           1.73      yamt 	swc->swc_nused++;
    736           1.73      yamt 
    737           1.73      yamt 	return 0;
    738           1.73      yamt }
    739           1.73      yamt 
    740           1.73      yamt static void
    741   1.93.4.2.4.3      matt swapcluster_flush(struct uvm_pggroup *grp, struct swapcluster *swc, bool now)
    742           1.73      yamt {
    743           1.73      yamt 	int slot;
    744   1.93.4.2.4.3      matt 	u_int nused;
    745           1.73      yamt 	int nallocated;
    746           1.73      yamt 	int error;
    747           1.73      yamt 
    748           1.73      yamt 	if (swc->swc_slot == 0) {
    749           1.73      yamt 		return;
    750           1.73      yamt 	}
    751           1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    752           1.73      yamt 
    753           1.73      yamt 	slot = swc->swc_slot;
    754           1.73      yamt 	nused = swc->swc_nused;
    755           1.73      yamt 	nallocated = swc->swc_nallocated;
    756           1.73      yamt 
    757           1.73      yamt 	/*
    758           1.73      yamt 	 * if this is the final pageout we could have a few
    759           1.73      yamt 	 * unused swap blocks.  if so, free them now.
    760           1.73      yamt 	 */
    761           1.73      yamt 
    762           1.73      yamt 	if (nused < nallocated) {
    763           1.73      yamt 		if (!now) {
    764           1.73      yamt 			return;
    765           1.73      yamt 		}
    766           1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    767           1.73      yamt 	}
    768           1.73      yamt 
    769           1.73      yamt 	/*
    770           1.73      yamt 	 * now start the pageout.
    771           1.73      yamt 	 */
    772           1.73      yamt 
    773           1.91      yamt 	if (nused > 0) {
    774   1.93.4.2.4.3      matt 		grp->pgrp_pdpageouts++;
    775   1.93.4.2.4.3      matt 		uvmexp.pdpageouts++;	/* procfs */
    776   1.93.4.2.4.3      matt 		uvm_pageout_start(grp, nused);
    777           1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    778           1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    779           1.91      yamt 	}
    780           1.73      yamt 
    781           1.73      yamt 	/*
    782           1.73      yamt 	 * zero swslot to indicate that we are
    783           1.73      yamt 	 * no longer building a swap-backed cluster.
    784           1.73      yamt 	 */
    785           1.73      yamt 
    786           1.73      yamt 	swc->swc_slot = 0;
    787           1.89        ad 	swc->swc_nused = 0;
    788           1.89        ad }
    789           1.89        ad 
    790           1.89        ad static int
    791           1.89        ad swapcluster_nused(struct swapcluster *swc)
    792           1.89        ad {
    793           1.89        ad 
    794           1.89        ad 	return swc->swc_nused;
    795           1.73      yamt }
    796           1.77      yamt 
    797           1.77      yamt /*
    798           1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    799           1.77      yamt  *
    800           1.77      yamt  * => called with owner locked.
    801           1.84   thorpej  * => return true if a page had an associated slot.
    802           1.77      yamt  */
    803           1.77      yamt 
    804           1.83   thorpej static bool
    805           1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    806           1.77      yamt {
    807           1.84   thorpej 	bool result = false;
    808           1.77      yamt 	struct vm_anon *anon = pg->uanon;
    809           1.77      yamt 
    810           1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    811           1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    812           1.77      yamt 		anon->an_swslot = 0;
    813           1.77      yamt 		pg->flags &= ~PG_CLEAN;
    814           1.84   thorpej 		result = true;
    815           1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    816           1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    817           1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    818           1.77      yamt 		if (slot) {
    819           1.77      yamt 			uvm_swap_free(slot, 1);
    820           1.77      yamt 			pg->flags &= ~PG_CLEAN;
    821           1.84   thorpej 			result = true;
    822           1.77      yamt 		}
    823           1.77      yamt 	}
    824           1.77      yamt 
    825           1.77      yamt 	return result;
    826           1.77      yamt }
    827           1.77      yamt 
    828           1.77      yamt /*
    829           1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    830           1.77      yamt  *
    831           1.84   thorpej  * => return true if a slot is successfully freed.
    832           1.77      yamt  */
    833           1.77      yamt 
    834           1.83   thorpej bool
    835           1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    836           1.77      yamt {
    837           1.89        ad 	kmutex_t *slock;
    838           1.83   thorpej 	bool result;
    839           1.77      yamt 
    840           1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    841           1.84   thorpej 		return false;
    842           1.77      yamt 	}
    843           1.77      yamt 
    844           1.77      yamt 	/*
    845           1.77      yamt 	 * lock the page's owner.
    846           1.77      yamt 	 */
    847           1.77      yamt 
    848           1.77      yamt 	slock = uvmpd_trylockowner(pg);
    849           1.77      yamt 	if (slock == NULL) {
    850           1.84   thorpej 		return false;
    851           1.77      yamt 	}
    852           1.77      yamt 
    853           1.77      yamt 	/*
    854           1.77      yamt 	 * skip this page if it's busy.
    855           1.77      yamt 	 */
    856           1.77      yamt 
    857           1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    858           1.89        ad 		mutex_exit(slock);
    859           1.84   thorpej 		return false;
    860           1.77      yamt 	}
    861           1.77      yamt 
    862           1.77      yamt 	result = uvmpd_dropswap(pg);
    863           1.77      yamt 
    864           1.89        ad 	mutex_exit(slock);
    865           1.77      yamt 
    866           1.77      yamt 	return result;
    867           1.77      yamt }
    868           1.77      yamt 
    869           1.73      yamt #endif /* defined(VMSWAP) */
    870           1.73      yamt 
    871            1.1       mrg /*
    872           1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    873           1.77      yamt  * to clean or free.
    874            1.1       mrg  *
    875            1.1       mrg  * => called with page queues locked
    876            1.1       mrg  * => we work on meeting our free target by converting inactive pages
    877            1.1       mrg  *    into free pages.
    878            1.1       mrg  * => we handle the building of swap-backed clusters
    879            1.1       mrg  */
    880            1.1       mrg 
    881           1.65   thorpej static void
    882   1.93.4.2.4.3      matt uvmpd_scan_queue(struct uvm_pggroup *grp)
    883            1.8       mrg {
    884   1.93.4.2.4.3      matt 	struct vm_page *pg;
    885            1.8       mrg 	struct uvm_object *uobj;
    886           1.37       chs 	struct vm_anon *anon;
    887           1.68      yamt #if defined(VMSWAP)
    888           1.73      yamt 	struct swapcluster swc;
    889           1.68      yamt #endif /* defined(VMSWAP) */
    890   1.93.4.2.4.6      matt 	u_int dirtyreacts;
    891   1.93.4.2.4.6      matt 	u_int lockownerfail;
    892   1.93.4.2.4.6      matt 	u_int victims;
    893   1.93.4.2.4.6      matt 	u_int freed;
    894   1.93.4.2.4.6      matt 	u_int busy;
    895           1.89        ad 	kmutex_t *slock;
    896           1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    897            1.1       mrg 
    898            1.8       mrg 	/*
    899            1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    900           1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    901            1.8       mrg 	 * a swap-cluster to build.
    902            1.8       mrg 	 */
    903           1.24       chs 
    904           1.73      yamt #if defined(VMSWAP)
    905           1.73      yamt 	swapcluster_init(&swc);
    906           1.73      yamt #endif /* defined(VMSWAP) */
    907           1.77      yamt 
    908           1.14       chs 	dirtyreacts = 0;
    909           1.89        ad 	lockownerfail = 0;
    910   1.93.4.2.4.6      matt 	victims = 0;
    911   1.93.4.2.4.6      matt 	freed = 0;
    912   1.93.4.2.4.6      matt 	busy = 0;
    913   1.93.4.2.4.3      matt 	uvmpdpol_scaninit(grp);
    914           1.43       chs 
    915   1.93.4.2.4.6      matt 	UVMHIST_LOG(pdhist,"  [%zd]: want free target (%u)",
    916   1.93.4.2.4.8      matt 	    grp - uvm.pggroups, grp->pgrp_freetarg << 2, 0, 0);
    917           1.77      yamt 	while (/* CONSTCOND */ 1) {
    918           1.24       chs 
    919           1.73      yamt 		/*
    920           1.73      yamt 		 * see if we've met the free target.
    921           1.73      yamt 		 */
    922           1.73      yamt 
    923   1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging
    924           1.89        ad #if defined(VMSWAP)
    925           1.89        ad 		    + swapcluster_nused(&swc)
    926           1.89        ad #endif /* defined(VMSWAP) */
    927   1.93.4.2.4.3      matt 		    >= grp->pgrp_freetarg << 2 ||
    928           1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    929   1.93.4.2.4.6      matt 			UVMHIST_LOG(pdhist,"  [%zd]: met free target (%u + %u)"
    930   1.93.4.2.4.6      matt 			    ", dirty reacts %u",
    931   1.93.4.2.4.6      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    932   1.93.4.2.4.6      matt 			    grp->pgrp_paging, dirtyreacts);
    933           1.73      yamt 			break;
    934           1.73      yamt 		}
    935           1.24       chs 
    936   1.93.4.2.4.3      matt 		pg = uvmpdpol_selectvictim(grp);
    937   1.93.4.2.4.3      matt 		if (pg == NULL) {
    938   1.93.4.2.4.6      matt 			UVMHIST_LOG(pdhist,"  [%zd]: selectvictim didn't",
    939   1.93.4.2.4.6      matt 			    grp - uvm.pggroups, 0, 0, 0);
    940           1.77      yamt 			break;
    941           1.77      yamt 		}
    942   1.93.4.2.4.6      matt 		victims++;
    943   1.93.4.2.4.3      matt 		KASSERT(uvmpdpol_pageisqueued_p(pg));
    944   1.93.4.2.4.3      matt 		KASSERT(pg->wire_count == 0);
    945           1.77      yamt 
    946           1.73      yamt 		/*
    947           1.73      yamt 		 * we are below target and have a new page to consider.
    948           1.73      yamt 		 */
    949           1.30       chs 
    950   1.93.4.2.4.3      matt 		anon = pg->uanon;
    951   1.93.4.2.4.3      matt 		uobj = pg->uobject;
    952            1.8       mrg 
    953           1.73      yamt 		/*
    954           1.73      yamt 		 * first we attempt to lock the object that this page
    955           1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    956           1.73      yamt 		 * the next page (no harm done).  it is important to
    957           1.73      yamt 		 * "try" locking the object as we are locking in the
    958           1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    959           1.73      yamt 		 * deadlock.
    960           1.73      yamt 		 *
    961           1.73      yamt 		 * the only time we expect to see an ownerless page
    962           1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    963           1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    964           1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    965           1.73      yamt 		 * case, the anon can "take over" the loaned page
    966           1.73      yamt 		 * and make it its own.
    967           1.73      yamt 		 */
    968           1.30       chs 
    969   1.93.4.2.4.3      matt 		slock = uvmpd_trylockowner(pg);
    970           1.76      yamt 		if (slock == NULL) {
    971           1.89        ad 			/*
    972           1.89        ad 			 * yield cpu to make a chance for an LWP holding
    973           1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    974           1.89        ad 			 * if the page queue is filled with a lot of pages
    975           1.89        ad 			 * from few objects.
    976           1.89        ad 			 */
    977           1.89        ad 			lockownerfail++;
    978           1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    979           1.89        ad 				mutex_exit(&uvm_pageqlock);
    980           1.89        ad 				/* XXX Better than yielding but inadequate. */
    981           1.89        ad 				kpause("livelock", false, 1, NULL);
    982           1.89        ad 				mutex_enter(&uvm_pageqlock);
    983           1.89        ad 				lockownerfail = 0;
    984           1.89        ad 			}
    985           1.76      yamt 			continue;
    986           1.76      yamt 		}
    987   1.93.4.2.4.3      matt 		if (pg->flags & PG_BUSY) {
    988           1.89        ad 			mutex_exit(slock);
    989   1.93.4.2.4.6      matt 			busy++;
    990           1.76      yamt 			continue;
    991           1.76      yamt 		}
    992           1.76      yamt 
    993           1.73      yamt 		/* does the page belong to an object? */
    994           1.73      yamt 		if (uobj != NULL) {
    995   1.93.4.2.4.3      matt 			grp->pgrp_pdobscan++;
    996           1.73      yamt 		} else {
    997           1.73      yamt #if defined(VMSWAP)
    998           1.73      yamt 			KASSERT(anon != NULL);
    999   1.93.4.2.4.3      matt 			grp->pgrp_pdanscan++;
   1000           1.68      yamt #else /* defined(VMSWAP) */
   1001           1.73      yamt 			panic("%s: anon", __func__);
   1002           1.68      yamt #endif /* defined(VMSWAP) */
   1003           1.73      yamt 		}
   1004            1.8       mrg 
   1005           1.37       chs 
   1006           1.73      yamt 		/*
   1007           1.73      yamt 		 * we now have the object and the page queues locked.
   1008           1.73      yamt 		 * if the page is not swap-backed, call the object's
   1009           1.73      yamt 		 * pager to flush and free the page.
   1010           1.73      yamt 		 */
   1011           1.37       chs 
   1012           1.69      yamt #if defined(READAHEAD_STATS)
   1013   1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1014   1.93.4.2.4.3      matt 			pg->pqflags &= ~PQ_READAHEAD;
   1015           1.73      yamt 			uvm_ra_miss.ev_count++;
   1016           1.73      yamt 		}
   1017           1.69      yamt #endif /* defined(READAHEAD_STATS) */
   1018           1.69      yamt 
   1019   1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
   1020           1.82       alc 			KASSERT(uobj != NULL);
   1021           1.89        ad 			mutex_exit(&uvm_pageqlock);
   1022   1.93.4.2.4.3      matt 			(void) (uobj->pgops->pgo_put)(uobj, pg->offset,
   1023   1.93.4.2.4.3      matt 			    pg->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
   1024   1.93.4.2.4.6      matt 			grp->pgrp_pdputs++;
   1025           1.89        ad 			mutex_enter(&uvm_pageqlock);
   1026           1.73      yamt 			continue;
   1027           1.73      yamt 		}
   1028           1.37       chs 
   1029           1.73      yamt 		/*
   1030           1.73      yamt 		 * the page is swap-backed.  remove all the permissions
   1031           1.73      yamt 		 * from the page so we can sync the modified info
   1032           1.73      yamt 		 * without any race conditions.  if the page is clean
   1033           1.73      yamt 		 * we can free it now and continue.
   1034           1.73      yamt 		 */
   1035            1.8       mrg 
   1036   1.93.4.2.4.3      matt 		pmap_page_protect(pg, VM_PROT_NONE);
   1037   1.93.4.2.4.3      matt 		if ((pg->flags & PG_CLEAN) && pmap_clear_modify(pg)) {
   1038   1.93.4.2.4.3      matt 			pg->flags &= ~(PG_CLEAN);
   1039           1.73      yamt 		}
   1040   1.93.4.2.4.3      matt 		if (pg->flags & PG_CLEAN) {
   1041           1.73      yamt 			int slot;
   1042           1.73      yamt 			int pageidx;
   1043           1.73      yamt 
   1044   1.93.4.2.4.3      matt 			pageidx = pg->offset >> PAGE_SHIFT;
   1045   1.93.4.2.4.3      matt 			KASSERT(!uvmpdpol_pageisqueued_p(pg));
   1046   1.93.4.2.4.3      matt 			uvm_pagefree(pg);
   1047   1.93.4.2.4.6      matt 			freed++;
   1048            1.8       mrg 
   1049            1.8       mrg 			/*
   1050           1.73      yamt 			 * for anons, we need to remove the page
   1051           1.73      yamt 			 * from the anon ourselves.  for aobjs,
   1052           1.73      yamt 			 * pagefree did that for us.
   1053            1.8       mrg 			 */
   1054           1.24       chs 
   1055           1.73      yamt 			if (anon) {
   1056           1.73      yamt 				KASSERT(anon->an_swslot != 0);
   1057           1.73      yamt 				anon->an_page = NULL;
   1058           1.73      yamt 				slot = anon->an_swslot;
   1059           1.73      yamt 			} else {
   1060           1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
   1061            1.8       mrg 			}
   1062           1.89        ad 			mutex_exit(slock);
   1063            1.8       mrg 
   1064           1.73      yamt 			if (slot > 0) {
   1065           1.73      yamt 				/* this page is now only in swap. */
   1066           1.87        ad 				mutex_enter(&uvm_swap_data_lock);
   1067           1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
   1068           1.73      yamt 				uvmexp.swpgonly++;
   1069           1.87        ad 				mutex_exit(&uvm_swap_data_lock);
   1070           1.37       chs 			}
   1071           1.73      yamt 			continue;
   1072           1.73      yamt 		}
   1073           1.37       chs 
   1074           1.77      yamt #if defined(VMSWAP)
   1075           1.73      yamt 		/*
   1076           1.73      yamt 		 * this page is dirty, skip it if we'll have met our
   1077           1.73      yamt 		 * free target when all the current pageouts complete.
   1078           1.73      yamt 		 */
   1079           1.24       chs 
   1080   1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging > grp->pgrp_freetarg << 2) {
   1081           1.89        ad 			mutex_exit(slock);
   1082           1.73      yamt 			continue;
   1083           1.73      yamt 		}
   1084           1.14       chs 
   1085           1.73      yamt 		/*
   1086           1.73      yamt 		 * free any swap space allocated to the page since
   1087           1.73      yamt 		 * we'll have to write it again with its new data.
   1088           1.73      yamt 		 */
   1089           1.24       chs 
   1090   1.93.4.2.4.3      matt 		uvmpd_dropswap(pg);
   1091           1.14       chs 
   1092           1.73      yamt 		/*
   1093           1.73      yamt 		 * start new swap pageout cluster (if necessary).
   1094       1.93.4.1       snj 		 *
   1095       1.93.4.1       snj 		 * if swap is full reactivate this page so that
   1096       1.93.4.1       snj 		 * we eventually cycle all pages through the
   1097       1.93.4.1       snj 		 * inactive queue.
   1098            1.8       mrg 		 */
   1099           1.24       chs 
   1100           1.73      yamt 		if (swapcluster_allocslots(&swc)) {
   1101       1.93.4.1       snj 			dirtyreacts++;
   1102   1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1103           1.89        ad 			mutex_exit(slock);
   1104           1.73      yamt 			continue;
   1105            1.8       mrg 		}
   1106            1.8       mrg 
   1107            1.8       mrg 		/*
   1108           1.73      yamt 		 * at this point, we're definitely going reuse this
   1109           1.73      yamt 		 * page.  mark the page busy and delayed-free.
   1110           1.73      yamt 		 * we should remove the page from the page queues
   1111           1.73      yamt 		 * so we don't ever look at it again.
   1112           1.73      yamt 		 * adjust counters and such.
   1113            1.8       mrg 		 */
   1114            1.8       mrg 
   1115   1.93.4.2.4.3      matt 		pg->flags |= PG_BUSY;
   1116   1.93.4.2.4.8      matt 		UVM_PAGE_OWN(pg, "scan_queue", NULL);
   1117           1.73      yamt 
   1118   1.93.4.2.4.3      matt 		pg->flags |= PG_PAGEOUT;
   1119   1.93.4.2.4.3      matt 		uvm_pagedequeue(pg);
   1120           1.73      yamt 
   1121   1.93.4.2.4.3      matt 		grp->pgrp_pgswapout++;
   1122           1.89        ad 		mutex_exit(&uvm_pageqlock);
   1123            1.8       mrg 
   1124            1.8       mrg 		/*
   1125           1.73      yamt 		 * add the new page to the cluster.
   1126            1.8       mrg 		 */
   1127            1.8       mrg 
   1128   1.93.4.2.4.3      matt 		if (swapcluster_add(&swc, pg)) {
   1129   1.93.4.2.4.3      matt 			pg->flags &= ~(PG_BUSY|PG_PAGEOUT);
   1130   1.93.4.2.4.8      matt 			UVM_PAGE_OWN(pg, NULL, NULL);
   1131           1.89        ad 			mutex_enter(&uvm_pageqlock);
   1132           1.77      yamt 			dirtyreacts++;
   1133   1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1134           1.89        ad 			mutex_exit(slock);
   1135           1.73      yamt 			continue;
   1136           1.73      yamt 		}
   1137           1.89        ad 		mutex_exit(slock);
   1138           1.73      yamt 
   1139   1.93.4.2.4.3      matt 		swapcluster_flush(grp, &swc, false);
   1140           1.89        ad 		mutex_enter(&uvm_pageqlock);
   1141           1.73      yamt 
   1142            1.8       mrg 		/*
   1143           1.31       chs 		 * the pageout is in progress.  bump counters and set up
   1144           1.31       chs 		 * for the next loop.
   1145            1.8       mrg 		 */
   1146            1.8       mrg 
   1147           1.31       chs 		uvmexp.pdpending++;
   1148           1.77      yamt #else /* defined(VMSWAP) */
   1149   1.93.4.2.4.3      matt 		uvm_pageactivate(pg);
   1150           1.89        ad 		mutex_exit(slock);
   1151           1.77      yamt #endif /* defined(VMSWAP) */
   1152           1.73      yamt 	}
   1153           1.73      yamt 
   1154   1.93.4.2.4.6      matt 	UVMHIST_LOG(pdhist,"  [%zd] <-- done: %u victims: %u freed, %u busy",
   1155   1.93.4.2.4.6      matt 	    grp - uvm.pggroups, victims, freed, busy);
   1156   1.93.4.2.4.6      matt 
   1157   1.93.4.2.4.6      matt 	grp->pgrp_pdvictims += victims;
   1158   1.93.4.2.4.6      matt 	grp->pgrp_pdnullscans += (victims == 0);
   1159   1.93.4.2.4.6      matt 	grp->pgrp_pdfreed += freed;
   1160   1.93.4.2.4.6      matt 	grp->pgrp_pdbusy += busy;
   1161   1.93.4.2.4.6      matt 
   1162           1.73      yamt #if defined(VMSWAP)
   1163           1.89        ad 	mutex_exit(&uvm_pageqlock);
   1164   1.93.4.2.4.3      matt 	swapcluster_flush(grp, &swc, true);
   1165           1.89        ad 	mutex_enter(&uvm_pageqlock);
   1166           1.68      yamt #endif /* defined(VMSWAP) */
   1167            1.1       mrg }
   1168            1.1       mrg 
   1169            1.1       mrg /*
   1170            1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
   1171            1.1       mrg  *
   1172            1.1       mrg  * => called with pageq's locked
   1173            1.1       mrg  */
   1174            1.1       mrg 
   1175  1.93.4.2.4.10      matt static bool
   1176   1.93.4.2.4.3      matt uvmpd_scan(struct uvm_pggroup *grp)
   1177            1.1       mrg {
   1178   1.93.4.2.4.3      matt 	u_int swap_shortage, pages_freed;
   1179            1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
   1180            1.1       mrg 
   1181   1.93.4.2.4.3      matt 	grp->pgrp_pdrevs++;
   1182            1.1       mrg 
   1183            1.8       mrg 	/*
   1184           1.93        ad 	 * work on meeting our targets.   first we work on our free target
   1185           1.93        ad 	 * by converting inactive pages into free pages.  then we work on
   1186           1.93        ad 	 * meeting our inactive target by converting active pages to
   1187           1.93        ad 	 * inactive ones.
   1188            1.8       mrg 	 */
   1189            1.8       mrg 
   1190            1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
   1191            1.8       mrg 
   1192   1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed;
   1193   1.93.4.2.4.3      matt 	uvmpd_scan_queue(grp);
   1194   1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed - pages_freed;
   1195            1.8       mrg 
   1196            1.8       mrg 	/*
   1197           1.14       chs 	 * detect if we're not going to be able to page anything out
   1198           1.14       chs 	 * until we free some swap resources from active pages.
   1199           1.14       chs 	 */
   1200           1.24       chs 
   1201           1.14       chs 	swap_shortage = 0;
   1202  1.93.4.2.4.10      matt 	if (pages_freed == 0
   1203  1.93.4.2.4.10      matt 	    && grp->pgrp_free < grp->pgrp_freetarg
   1204  1.93.4.2.4.10      matt 	    && uvmexp.swpginuse >= uvmexp.swpgavail
   1205  1.93.4.2.4.10      matt 	    && !uvm_swapisfull()) {
   1206   1.93.4.2.4.3      matt 		swap_shortage = grp->pgrp_freetarg - grp->pgrp_free;
   1207           1.14       chs 	}
   1208           1.24       chs 
   1209   1.93.4.2.4.3      matt 	uvmpdpol_balancequeue(grp, swap_shortage);
   1210           1.93        ad 
   1211           1.93        ad 	/*
   1212           1.93        ad 	 * swap out some processes if we are still below the minimum
   1213           1.93        ad 	 * free target.  we need to unlock the page queues for this.
   1214           1.93        ad 	 */
   1215           1.93        ad 
   1216  1.93.4.2.4.10      matt #ifdef VMSWAP
   1217   1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freemin
   1218   1.93.4.2.4.3      matt 	    && uvmexp.nswapdev != 0 && uvm.swapout_enabled) {
   1219   1.93.4.2.4.3      matt 		grp->pgrp_pdswout++;
   1220           1.93        ad 		UVMHIST_LOG(pdhist,"  free %d < min %d: swapout",
   1221           1.93        ad 		    uvmexp.free, uvmexp.freemin, 0, 0);
   1222           1.93        ad 		mutex_exit(&uvm_pageqlock);
   1223           1.93        ad 		uvm_swapout_threads();
   1224           1.93        ad 		mutex_enter(&uvm_pageqlock);
   1225           1.93        ad 
   1226           1.93        ad 	}
   1227  1.93.4.2.4.10      matt #endif /* VMSWAP */
   1228  1.93.4.2.4.10      matt 
   1229  1.93.4.2.4.10      matt 	return pages_freed != 0;
   1230            1.1       mrg }
   1231           1.62      yamt 
   1232           1.62      yamt /*
   1233           1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
   1234           1.62      yamt  *
   1235           1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
   1236           1.62      yamt  *
   1237           1.62      yamt  * XXX should be tunable.
   1238           1.62      yamt  * XXX should consider pools, etc?
   1239           1.62      yamt  */
   1240           1.62      yamt 
   1241           1.83   thorpej bool
   1242   1.93.4.2.4.9      matt uvm_reclaimable(u_int color, bool kmem_p)
   1243           1.62      yamt {
   1244  1.93.4.2.4.11      matt 	KASSERT(color < uvmexp.ncolors);
   1245  1.93.4.2.4.11      matt 
   1246           1.62      yamt 	/*
   1247           1.62      yamt 	 * if swap is not full, no problem.
   1248           1.62      yamt 	 */
   1249   1.93.4.2.4.9      matt #ifdef VMSWAP
   1250           1.62      yamt 	if (!uvm_swapisfull()) {
   1251  1.93.4.2.4.12      matt 		KASSERT(uvmexp.nswapdev > 0);
   1252           1.84   thorpej 		return true;
   1253           1.62      yamt 	}
   1254   1.93.4.2.4.9      matt #endif
   1255           1.62      yamt 
   1256           1.62      yamt 	/*
   1257           1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1258           1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1259           1.62      yamt 	 *
   1260           1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1261           1.63      yamt 	 *
   1262           1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1263           1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1264           1.62      yamt 	 */
   1265  1.93.4.2.4.12      matt 	u_int active = 0;
   1266  1.93.4.2.4.12      matt 	u_int inactive = 0;
   1267  1.93.4.2.4.12      matt 	u_int filepages = 0;
   1268  1.93.4.2.4.12      matt 	u_int npages = 0;
   1269   1.93.4.2.4.9      matt 	for (u_int lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1270   1.93.4.2.4.9      matt 		struct uvm_pggroup * const grp =
   1271   1.93.4.2.4.9      matt 		    uvm.page_free[color].pgfl_pggroups[lcv];
   1272           1.62      yamt 
   1273   1.93.4.2.4.9      matt #ifdef VM_FREELIST_NORMALOK_P
   1274   1.93.4.2.4.9      matt 		/*
   1275   1.93.4.2.4.9      matt 		 * If this for kmem and it's a normal freelist, skip it.
   1276   1.93.4.2.4.9      matt 		 */
   1277   1.93.4.2.4.9      matt 		if (kmem_p && VM_FREELIST_NORMALOK_P(lcv))
   1278   1.93.4.2.4.9      matt 			continue;
   1279   1.93.4.2.4.9      matt #endif
   1280   1.93.4.2.4.9      matt 
   1281   1.93.4.2.4.9      matt 		npages += grp->pgrp_npages;
   1282   1.93.4.2.4.9      matt 		filepages += grp->pgrp_filepages + grp->pgrp_execpages;
   1283   1.93.4.2.4.9      matt 		uvm_estimatepageable(grp, &active, &inactive);
   1284   1.93.4.2.4.9      matt 	}
   1285   1.93.4.2.4.9      matt 	filepages -= uvmexp.wired;
   1286   1.93.4.2.4.9      matt 	/*
   1287   1.93.4.2.4.9      matt 	 *
   1288   1.93.4.2.4.9      matt 	 */
   1289   1.93.4.2.4.9      matt 	if (filepages >= MIN((active + inactive) >> 4, npages / 25)) {
   1290           1.84   thorpej 		return true;
   1291           1.62      yamt 	}
   1292           1.62      yamt 
   1293           1.62      yamt 	/*
   1294           1.62      yamt 	 * kill the process, fail allocation, etc..
   1295           1.62      yamt 	 */
   1296           1.62      yamt 
   1297           1.84   thorpej 	return false;
   1298           1.62      yamt }
   1299           1.77      yamt 
   1300           1.77      yamt void
   1301   1.93.4.2.4.9      matt uvm_estimatepageable(const struct uvm_pggroup *grp,
   1302  1.93.4.2.4.12      matt 	u_int *activep, u_int *inactivep)
   1303           1.77      yamt {
   1304           1.77      yamt 
   1305  1.93.4.2.4.12      matt 	uvmpdpol_estimatepageable(grp, activep, inactivep);
   1306           1.77      yamt }
   1307