Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.12.16.6
      1  1.12.16.6     matt /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.6 2012/04/12 19:39:55 matt Exp $	*/
      2        1.2     yamt /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3        1.2     yamt 
      4        1.2     yamt /*
      5        1.2     yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6        1.2     yamt  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7        1.2     yamt  *
      8        1.2     yamt  * All rights reserved.
      9        1.2     yamt  *
     10        1.2     yamt  * This code is derived from software contributed to Berkeley by
     11        1.2     yamt  * The Mach Operating System project at Carnegie-Mellon University.
     12        1.2     yamt  *
     13        1.2     yamt  * Redistribution and use in source and binary forms, with or without
     14        1.2     yamt  * modification, are permitted provided that the following conditions
     15        1.2     yamt  * are met:
     16        1.2     yamt  * 1. Redistributions of source code must retain the above copyright
     17        1.2     yamt  *    notice, this list of conditions and the following disclaimer.
     18        1.2     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     19        1.2     yamt  *    notice, this list of conditions and the following disclaimer in the
     20        1.2     yamt  *    documentation and/or other materials provided with the distribution.
     21        1.2     yamt  * 3. All advertising materials mentioning features or use of this software
     22        1.2     yamt  *    must display the following acknowledgement:
     23        1.2     yamt  *	This product includes software developed by Charles D. Cranor,
     24        1.2     yamt  *      Washington University, the University of California, Berkeley and
     25        1.2     yamt  *      its contributors.
     26        1.2     yamt  * 4. Neither the name of the University nor the names of its contributors
     27        1.2     yamt  *    may be used to endorse or promote products derived from this software
     28        1.2     yamt  *    without specific prior written permission.
     29        1.2     yamt  *
     30        1.2     yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     31        1.2     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     32        1.2     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33        1.2     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     34        1.2     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     35        1.2     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     36        1.2     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     37        1.2     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     38        1.2     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     39        1.2     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     40        1.2     yamt  * SUCH DAMAGE.
     41        1.2     yamt  *
     42        1.2     yamt  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     43        1.2     yamt  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     44        1.2     yamt  *
     45        1.2     yamt  *
     46        1.2     yamt  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     47        1.2     yamt  * All rights reserved.
     48        1.2     yamt  *
     49        1.2     yamt  * Permission to use, copy, modify and distribute this software and
     50        1.2     yamt  * its documentation is hereby granted, provided that both the copyright
     51        1.2     yamt  * notice and this permission notice appear in all copies of the
     52        1.2     yamt  * software, derivative works or modified versions, and any portions
     53        1.2     yamt  * thereof, and that both notices appear in supporting documentation.
     54        1.2     yamt  *
     55        1.2     yamt  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     56        1.2     yamt  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     57        1.2     yamt  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     58        1.2     yamt  *
     59        1.2     yamt  * Carnegie Mellon requests users of this software to return to
     60        1.2     yamt  *
     61        1.2     yamt  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     62        1.2     yamt  *  School of Computer Science
     63        1.2     yamt  *  Carnegie Mellon University
     64        1.2     yamt  *  Pittsburgh PA 15213-3890
     65        1.2     yamt  *
     66        1.2     yamt  * any improvements or extensions that they make and grant Carnegie the
     67        1.2     yamt  * rights to redistribute these changes.
     68        1.2     yamt  */
     69        1.2     yamt 
     70        1.2     yamt #if defined(PDSIM)
     71        1.2     yamt 
     72        1.2     yamt #include "pdsim.h"
     73        1.2     yamt 
     74        1.2     yamt #else /* defined(PDSIM) */
     75        1.2     yamt 
     76        1.2     yamt #include <sys/cdefs.h>
     77  1.12.16.6     matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.6 2012/04/12 19:39:55 matt Exp $");
     78        1.2     yamt 
     79        1.2     yamt #include <sys/param.h>
     80        1.2     yamt #include <sys/proc.h>
     81        1.2     yamt #include <sys/systm.h>
     82        1.2     yamt #include <sys/kernel.h>
     83        1.2     yamt 
     84        1.2     yamt #include <uvm/uvm.h>
     85        1.2     yamt #include <uvm/uvm_pdpolicy.h>
     86        1.2     yamt #include <uvm/uvm_pdpolicy_impl.h>
     87        1.2     yamt 
     88        1.2     yamt #endif /* defined(PDSIM) */
     89        1.2     yamt 
     90        1.2     yamt #define PQ_INACTIVE	PQ_PRIVATE1	/* page is in inactive list */
     91        1.2     yamt #define PQ_ACTIVE	PQ_PRIVATE2	/* page is in active list */
     92  1.12.16.5     matt #define	PQ_RADIOACTIVE	PQ_PRIVATE3	/* page is in radioactive list */
     93        1.2     yamt 
     94        1.2     yamt #if !defined(CLOCK_INACTIVEPCT)
     95        1.2     yamt #define	CLOCK_INACTIVEPCT	33
     96        1.2     yamt #endif /* !defined(CLOCK_INACTIVEPCT) */
     97        1.2     yamt 
     98  1.12.16.2     matt struct uvmpdpol_scanstate {
     99  1.12.16.2     matt 	struct vm_page *ss_nextpg;
    100  1.12.16.2     matt 	bool ss_first;
    101  1.12.16.2     matt 	bool ss_anonreact, ss_filereact, ss_execreact;
    102  1.12.16.2     matt };
    103  1.12.16.2     matt 
    104  1.12.16.2     matt struct uvmpdpol_groupstate {
    105  1.12.16.2     matt 	struct pglist gs_activeq;	/* allocated pages, in use */
    106  1.12.16.2     matt 	struct pglist gs_inactiveq;	/* pages between the clock hands */
    107  1.12.16.5     matt 	struct pglist gs_radioactiveq;	/* allocated pages, in use */
    108  1.12.16.2     matt 	u_int gs_active;
    109  1.12.16.5     matt 	u_int gs_radioactive;
    110  1.12.16.2     matt 	u_int gs_inactive;
    111  1.12.16.2     matt 	u_int gs_inactarg;
    112  1.12.16.2     matt 	struct uvmpdpol_scanstate gs_scanstate;
    113  1.12.16.2     matt };
    114  1.12.16.2     matt 
    115        1.2     yamt struct uvmpdpol_globalstate {
    116  1.12.16.2     matt 	struct uvmpdpol_groupstate *s_pggroups;
    117        1.2     yamt 	struct uvm_pctparam s_anonmin;
    118        1.2     yamt 	struct uvm_pctparam s_filemin;
    119        1.2     yamt 	struct uvm_pctparam s_execmin;
    120        1.2     yamt 	struct uvm_pctparam s_anonmax;
    121        1.2     yamt 	struct uvm_pctparam s_filemax;
    122        1.2     yamt 	struct uvm_pctparam s_execmax;
    123        1.2     yamt 	struct uvm_pctparam s_inactivepct;
    124        1.2     yamt };
    125        1.2     yamt 
    126        1.2     yamt 
    127        1.2     yamt static struct uvmpdpol_globalstate pdpol_state;
    128        1.2     yamt 
    129        1.2     yamt PDPOL_EVCNT_DEFINE(reactexec)
    130        1.2     yamt PDPOL_EVCNT_DEFINE(reactfile)
    131        1.2     yamt PDPOL_EVCNT_DEFINE(reactanon)
    132        1.2     yamt 
    133  1.12.16.2     matt #ifdef DEBUG
    134  1.12.16.2     matt static size_t
    135  1.12.16.2     matt clock_pglist_count(struct pglist *pglist)
    136  1.12.16.2     matt {
    137  1.12.16.2     matt 	size_t count = 0;
    138  1.12.16.2     matt 	struct vm_page *pg;
    139  1.12.16.2     matt 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    140  1.12.16.2     matt 		count++;
    141  1.12.16.2     matt 	}
    142  1.12.16.2     matt 	return count;
    143  1.12.16.2     matt }
    144  1.12.16.2     matt #endif
    145  1.12.16.2     matt 
    146  1.12.16.2     matt static size_t
    147  1.12.16.2     matt clock_space(void)
    148  1.12.16.2     matt {
    149  1.12.16.2     matt 	return sizeof(struct uvmpdpol_groupstate);
    150  1.12.16.2     matt }
    151  1.12.16.2     matt 
    152        1.2     yamt static void
    153  1.12.16.2     matt clock_tune(struct uvm_pggroup *grp)
    154        1.2     yamt {
    155  1.12.16.2     matt 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    156  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    157        1.2     yamt 
    158  1.12.16.2     matt 	gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    159  1.12.16.2     matt 	    gs->gs_active + gs->gs_inactive);
    160  1.12.16.2     matt 	if (gs->gs_inactarg <= grp->pgrp_freetarg) {
    161  1.12.16.2     matt 		gs->gs_inactarg = grp->pgrp_freetarg + 1;
    162        1.2     yamt 	}
    163        1.2     yamt }
    164        1.2     yamt 
    165        1.2     yamt void
    166  1.12.16.2     matt uvmpdpol_scaninit(struct uvm_pggroup *grp)
    167        1.2     yamt {
    168  1.12.16.2     matt 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    169  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    170  1.12.16.2     matt 	struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
    171        1.7  thorpej 	bool anonunder, fileunder, execunder;
    172        1.7  thorpej 	bool anonover, fileover, execover;
    173        1.7  thorpej 	bool anonreact, filereact, execreact;
    174        1.2     yamt 
    175        1.2     yamt 	/*
    176        1.2     yamt 	 * decide which types of pages we want to reactivate instead of freeing
    177        1.2     yamt 	 * to keep usage within the minimum and maximum usage limits.
    178        1.2     yamt 	 */
    179        1.2     yamt 
    180  1.12.16.2     matt 	u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
    181  1.12.16.2     matt 	anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    182  1.12.16.2     matt 	fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    183  1.12.16.2     matt 	execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    184  1.12.16.2     matt 	anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    185  1.12.16.2     matt 	fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    186  1.12.16.2     matt 	execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    187        1.2     yamt 	anonreact = anonunder || (!anonover && (fileover || execover));
    188        1.2     yamt 	filereact = fileunder || (!fileover && (anonover || execover));
    189        1.2     yamt 	execreact = execunder || (!execover && (anonover || fileover));
    190        1.2     yamt 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    191        1.8  thorpej 		anonreact = filereact = execreact = false;
    192        1.2     yamt 	}
    193        1.2     yamt 	ss->ss_anonreact = anonreact;
    194        1.2     yamt 	ss->ss_filereact = filereact;
    195        1.2     yamt 	ss->ss_execreact = execreact;
    196        1.2     yamt 
    197        1.8  thorpej 	ss->ss_first = true;
    198        1.2     yamt }
    199        1.2     yamt 
    200        1.2     yamt struct vm_page *
    201  1.12.16.2     matt uvmpdpol_selectvictim(struct uvm_pggroup *grp)
    202        1.2     yamt {
    203  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    204  1.12.16.2     matt 	struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
    205        1.2     yamt 	struct vm_page *pg;
    206  1.12.16.2     matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    207        1.2     yamt 
    208        1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    209        1.2     yamt 
    210        1.2     yamt 	while (/* CONSTCOND */ 1) {
    211        1.2     yamt 		struct vm_anon *anon;
    212        1.2     yamt 		struct uvm_object *uobj;
    213        1.2     yamt 
    214  1.12.16.3     matt 		//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    215  1.12.16.2     matt 
    216        1.2     yamt 		if (ss->ss_first) {
    217  1.12.16.2     matt 			pg = TAILQ_FIRST(&gs->gs_inactiveq);
    218        1.8  thorpej 			ss->ss_first = false;
    219  1.12.16.2     matt 			UVMHIST_LOG(pdhist, "  select first inactive page: %p",
    220  1.12.16.2     matt 			    pg, 0, 0, 0);
    221        1.2     yamt 		} else {
    222        1.2     yamt 			pg = ss->ss_nextpg;
    223        1.2     yamt 			if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
    224  1.12.16.2     matt 				pg = TAILQ_FIRST(&gs->gs_inactiveq);
    225        1.2     yamt 			}
    226  1.12.16.2     matt 			UVMHIST_LOG(pdhist, "  select next inactive page: %p",
    227  1.12.16.2     matt 			    pg, 0, 0, 0);
    228        1.2     yamt 		}
    229        1.2     yamt 		if (pg == NULL) {
    230        1.2     yamt 			break;
    231        1.2     yamt 		}
    232       1.12       ad 		ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
    233        1.2     yamt 
    234  1.12.16.2     matt 		grp->pgrp_pdscans++;
    235        1.2     yamt 
    236        1.2     yamt 		/*
    237        1.2     yamt 		 * move referenced pages back to active queue and
    238        1.2     yamt 		 * skip to next page.
    239        1.2     yamt 		 */
    240        1.2     yamt 
    241        1.2     yamt 		if (pmap_is_referenced(pg)) {
    242        1.2     yamt 			uvmpdpol_pageactivate(pg);
    243  1.12.16.2     matt 			grp->pgrp_pdreact++;
    244        1.2     yamt 			continue;
    245        1.2     yamt 		}
    246        1.2     yamt 
    247        1.2     yamt 		anon = pg->uanon;
    248        1.2     yamt 		uobj = pg->uobject;
    249        1.2     yamt 
    250        1.2     yamt 		/*
    251        1.2     yamt 		 * enforce the minimum thresholds on different
    252        1.2     yamt 		 * types of memory usage.  if reusing the current
    253        1.2     yamt 		 * page would reduce that type of usage below its
    254        1.2     yamt 		 * minimum, reactivate the page instead and move
    255        1.2     yamt 		 * on to the next page.
    256        1.2     yamt 		 */
    257        1.2     yamt 
    258        1.2     yamt 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    259        1.2     yamt 			uvmpdpol_pageactivate(pg);
    260        1.2     yamt 			PDPOL_EVCNT_INCR(reactexec);
    261        1.2     yamt 			continue;
    262        1.2     yamt 		}
    263        1.2     yamt 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    264        1.2     yamt 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    265        1.2     yamt 			uvmpdpol_pageactivate(pg);
    266        1.2     yamt 			PDPOL_EVCNT_INCR(reactfile);
    267        1.2     yamt 			continue;
    268        1.2     yamt 		}
    269        1.2     yamt 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    270        1.2     yamt 			uvmpdpol_pageactivate(pg);
    271        1.2     yamt 			PDPOL_EVCNT_INCR(reactanon);
    272        1.2     yamt 			continue;
    273        1.2     yamt 		}
    274        1.2     yamt 
    275        1.2     yamt 		break;
    276        1.2     yamt 	}
    277        1.2     yamt 
    278        1.2     yamt 	return pg;
    279        1.2     yamt }
    280        1.2     yamt 
    281        1.2     yamt void
    282  1.12.16.2     matt uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
    283        1.2     yamt {
    284  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    285  1.12.16.2     matt 
    286  1.12.16.1     matt 	struct vm_page *pg, *nextpg;
    287        1.2     yamt 
    288        1.2     yamt 	/*
    289        1.2     yamt 	 * we have done the scan to get free pages.   now we work on meeting
    290        1.2     yamt 	 * our inactive target.
    291        1.2     yamt 	 */
    292        1.2     yamt 
    293  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    294  1.12.16.2     matt 
    295  1.12.16.5     matt 	/*
    296  1.12.16.5     matt 	 * If swap was added, move all the pages from radioactive queue to the
    297  1.12.16.5     matt 	 * active queue.
    298  1.12.16.5     matt 	 */
    299  1.12.16.5     matt #ifdef VMSWAP
    300  1.12.16.5     matt 	if (uvmexp.nswapdev > 0) {
    301  1.12.16.5     matt 		while ((pg = TAILQ_FIRST(&gs->gs_radioactiveq)) != NULL) {
    302  1.12.16.5     matt 			uvmpdpol_pageactivate(pg);
    303  1.12.16.5     matt 		}
    304  1.12.16.5     matt 	}
    305  1.12.16.5     matt #endif
    306  1.12.16.5     matt 
    307  1.12.16.2     matt 	u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
    308  1.12.16.2     matt 	for (pg = TAILQ_FIRST(&gs->gs_activeq);
    309  1.12.16.1     matt 	     pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
    310  1.12.16.1     matt 	     pg = nextpg) {
    311  1.12.16.1     matt 		nextpg = TAILQ_NEXT(pg, pageq.queue);
    312        1.2     yamt 
    313        1.2     yamt 		/*
    314        1.2     yamt 		 * if there's a shortage of swap slots, try to free it.
    315        1.2     yamt 		 */
    316        1.2     yamt 
    317  1.12.16.2     matt #ifdef VMSWAP
    318  1.12.16.1     matt 		if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    319  1.12.16.1     matt 			if (uvmpd_trydropswap(pg)) {
    320        1.2     yamt 				swap_shortage--;
    321        1.2     yamt 			}
    322        1.2     yamt 		}
    323  1.12.16.2     matt #endif
    324        1.2     yamt 
    325        1.2     yamt 		/*
    326        1.2     yamt 		 * if there's a shortage of inactive pages, deactivate.
    327        1.2     yamt 		 */
    328        1.2     yamt 		if (inactive_shortage > 0) {
    329        1.2     yamt 			/* no need to check wire_count as pg is "active" */
    330  1.12.16.1     matt 			uvmpdpol_pagedeactivate(pg);
    331  1.12.16.2     matt 			grp->pgrp_pddeact++;
    332        1.2     yamt 			inactive_shortage--;
    333        1.2     yamt 		}
    334        1.2     yamt 	}
    335  1.12.16.2     matt 
    336  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    337        1.2     yamt }
    338        1.2     yamt 
    339        1.2     yamt void
    340        1.2     yamt uvmpdpol_pagedeactivate(struct vm_page *pg)
    341        1.2     yamt {
    342  1.12.16.2     matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    343  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    344        1.2     yamt 
    345  1.12.16.5     matt #if 0
    346  1.12.16.5     matt 	/*
    347  1.12.16.5     matt 	 * If there is no swap available and the page is anonymous without
    348  1.12.16.5     matt 	 * a backing store, don't bother marking it INACTIVE since it would
    349  1.12.16.5     matt 	 * only be a "dirty reactivation".
    350  1.12.16.5     matt 	 */
    351  1.12.16.6     matt 	if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    352  1.12.16.5     matt 		KASSERT(pg->pqflags & PQ_RADIOACTIVE);
    353  1.12.16.5     matt 		return;
    354  1.12.16.5     matt 	}
    355  1.12.16.5     matt #else
    356  1.12.16.5     matt 	KASSERT((pg->pqflags & PQ_RADIOACTIVE) == 0);
    357  1.12.16.5     matt #endif
    358  1.12.16.5     matt 
    359  1.12.16.4     matt 	KASSERT(!(pg->pqflags & PQ_FREE));
    360        1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    361  1.12.16.2     matt 
    362  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    363  1.12.16.2     matt 
    364        1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    365  1.12.16.2     matt 		TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
    366        1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    367  1.12.16.2     matt 		KASSERT(gs->gs_active > 0);
    368  1.12.16.2     matt 		gs->gs_active--;
    369  1.12.16.2     matt 		grp->pgrp_active--;
    370        1.2     yamt 	}
    371  1.12.16.2     matt 
    372  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    373  1.12.16.5     matt 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    374  1.12.16.5     matt 
    375  1.12.16.5     matt 	if (pg->pqflags & PQ_RADIOACTIVE) {
    376  1.12.16.5     matt 		TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
    377  1.12.16.5     matt 		pg->pqflags &= ~PQ_RADIOACTIVE;
    378  1.12.16.5     matt 		KASSERT(gs->gs_radioactive > 0);
    379  1.12.16.5     matt 		gs->gs_radioactive--;
    380  1.12.16.5     matt 		grp->pgrp_active--;
    381  1.12.16.5     matt 	}
    382  1.12.16.5     matt 
    383  1.12.16.5     matt 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    384  1.12.16.3     matt 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    385  1.12.16.2     matt 
    386        1.2     yamt 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    387        1.2     yamt 		KASSERT(pg->wire_count == 0);
    388       1.10     yamt 		pmap_clear_reference(pg);
    389  1.12.16.2     matt 		TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
    390        1.2     yamt 		pg->pqflags |= PQ_INACTIVE;
    391  1.12.16.2     matt 		gs->gs_inactive++;
    392  1.12.16.2     matt 		grp->pgrp_inactive++;
    393        1.2     yamt 	}
    394  1.12.16.2     matt 
    395  1.12.16.3     matt 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    396        1.2     yamt }
    397        1.2     yamt 
    398        1.2     yamt void
    399        1.2     yamt uvmpdpol_pageactivate(struct vm_page *pg)
    400        1.2     yamt {
    401  1.12.16.2     matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    402  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    403        1.2     yamt 
    404  1.12.16.4     matt 	KASSERT(!(pg->pqflags & PQ_FREE));
    405  1.12.16.4     matt 	KASSERT(mutex_owned(&uvm_pageqlock));
    406  1.12.16.4     matt 
    407        1.2     yamt 	uvmpdpol_pagedequeue(pg);
    408  1.12.16.6     matt 	if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    409  1.12.16.5     matt 		TAILQ_INSERT_TAIL(&gs->gs_radioactiveq, pg, pageq.queue);
    410  1.12.16.5     matt 		pg->pqflags |= PQ_RADIOACTIVE;
    411  1.12.16.5     matt 		gs->gs_radioactive++;
    412  1.12.16.5     matt 	} else {
    413  1.12.16.5     matt 		TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
    414  1.12.16.5     matt 		pg->pqflags |= PQ_ACTIVE;
    415  1.12.16.5     matt 		gs->gs_active++;
    416  1.12.16.5     matt 		grp->pgrp_active++;
    417  1.12.16.5     matt 	}
    418  1.12.16.2     matt 
    419  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    420        1.2     yamt }
    421        1.2     yamt 
    422        1.2     yamt void
    423        1.2     yamt uvmpdpol_pagedequeue(struct vm_page *pg)
    424        1.2     yamt {
    425  1.12.16.2     matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    426  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    427  1.12.16.2     matt 
    428  1.12.16.4     matt 	KASSERT(!(pg->pqflags & PQ_FREE));
    429  1.12.16.4     matt 	KASSERT(mutex_owned(&uvm_pageqlock));
    430  1.12.16.5     matt 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    431  1.12.16.5     matt 
    432  1.12.16.5     matt 	if (pg->pqflags & PQ_RADIOACTIVE) {
    433  1.12.16.5     matt 		TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
    434  1.12.16.5     matt 		pg->pqflags &= ~PQ_RADIOACTIVE;
    435  1.12.16.5     matt 		KASSERT(gs->gs_radioactive > 0);
    436  1.12.16.5     matt 		gs->gs_radioactive--;
    437  1.12.16.5     matt 		grp->pgrp_active--;
    438  1.12.16.5     matt 	}
    439  1.12.16.5     matt 
    440  1.12.16.5     matt 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    441  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    442        1.2     yamt 
    443        1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    444  1.12.16.2     matt 		TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
    445        1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    446  1.12.16.2     matt 		KASSERT(gs->gs_active > 0);
    447  1.12.16.2     matt 		gs->gs_active--;
    448  1.12.16.2     matt 		grp->pgrp_active--;
    449  1.12.16.2     matt 	}
    450  1.12.16.2     matt 
    451  1.12.16.3     matt 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    452  1.12.16.3     matt 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    453  1.12.16.2     matt 
    454  1.12.16.2     matt 	if (pg->pqflags & PQ_INACTIVE) {
    455  1.12.16.2     matt 		TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
    456        1.2     yamt 		pg->pqflags &= ~PQ_INACTIVE;
    457  1.12.16.2     matt 		KASSERT(gs->gs_inactive > 0);
    458  1.12.16.2     matt 		gs->gs_inactive--;
    459  1.12.16.2     matt 		grp->pgrp_inactive--;
    460        1.2     yamt 	}
    461  1.12.16.2     matt 
    462  1.12.16.3     matt 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    463        1.2     yamt }
    464        1.2     yamt 
    465        1.2     yamt void
    466        1.2     yamt uvmpdpol_pageenqueue(struct vm_page *pg)
    467        1.2     yamt {
    468        1.2     yamt 
    469        1.2     yamt 	uvmpdpol_pageactivate(pg);
    470        1.2     yamt }
    471        1.2     yamt 
    472        1.2     yamt void
    473        1.5     yamt uvmpdpol_anfree(struct vm_anon *an)
    474        1.2     yamt {
    475        1.2     yamt }
    476        1.2     yamt 
    477        1.7  thorpej bool
    478        1.2     yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
    479        1.2     yamt {
    480        1.2     yamt 
    481  1.12.16.5     matt 	return (pg->pqflags & (PQ_RADIOACTIVE | PQ_ACTIVE | PQ_INACTIVE)) != 0;
    482        1.2     yamt }
    483        1.2     yamt 
    484        1.2     yamt void
    485  1.12.16.5     matt uvmpdpol_estimatepageable(const struct uvm_pggroup *grp,
    486  1.12.16.5     matt 	u_int *activep, u_int *inactivep)
    487        1.2     yamt {
    488  1.12.16.5     matt 	if (grp != NULL) {
    489  1.12.16.5     matt 		struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    490  1.12.16.5     matt 		if (activep) {
    491  1.12.16.6     matt 			*activep += gs->gs_active + gs->gs_radioactive;
    492  1.12.16.5     matt 		}
    493  1.12.16.5     matt 		if (inactivep) {
    494  1.12.16.5     matt 			*inactivep = gs->gs_inactive;
    495  1.12.16.5     matt 		}
    496  1.12.16.5     matt 		return;
    497  1.12.16.5     matt 	}
    498  1.12.16.2     matt 
    499  1.12.16.5     matt 	u_int active = 0, inactive = 0;
    500  1.12.16.2     matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    501  1.12.16.2     matt 		struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    502        1.2     yamt 
    503  1.12.16.3     matt 		//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    504  1.12.16.3     matt 		//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    505  1.12.16.2     matt 
    506  1.12.16.6     matt 		active += gs->gs_active + gs->gs_radioactive;
    507  1.12.16.2     matt 		inactive += gs->gs_inactive;
    508  1.12.16.2     matt 	}
    509  1.12.16.2     matt 
    510  1.12.16.2     matt 	if (activep) {
    511  1.12.16.2     matt 		*activep = active;
    512        1.2     yamt 	}
    513  1.12.16.2     matt 	if (inactivep) {
    514  1.12.16.2     matt 		*inactivep = inactive;
    515        1.2     yamt 	}
    516        1.2     yamt }
    517        1.2     yamt 
    518        1.2     yamt #if !defined(PDSIM)
    519        1.2     yamt static int
    520        1.2     yamt min_check(struct uvm_pctparam *pct, int t)
    521        1.2     yamt {
    522  1.12.16.2     matt 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    523  1.12.16.2     matt 	u_int total = t;
    524        1.2     yamt 
    525        1.2     yamt 	if (pct != &s->s_anonmin) {
    526        1.2     yamt 		total += uvm_pctparam_get(&s->s_anonmin);
    527        1.2     yamt 	}
    528        1.2     yamt 	if (pct != &s->s_filemin) {
    529        1.2     yamt 		total += uvm_pctparam_get(&s->s_filemin);
    530        1.2     yamt 	}
    531        1.2     yamt 	if (pct != &s->s_execmin) {
    532        1.2     yamt 		total += uvm_pctparam_get(&s->s_execmin);
    533        1.2     yamt 	}
    534        1.2     yamt 	if (total > 95) {
    535        1.2     yamt 		return EINVAL;
    536        1.2     yamt 	}
    537        1.2     yamt 	return 0;
    538        1.2     yamt }
    539        1.2     yamt #endif /* !defined(PDSIM) */
    540        1.2     yamt 
    541        1.2     yamt void
    542  1.12.16.2     matt uvmpdpol_init(void *new_gs, size_t npggroups)
    543        1.2     yamt {
    544  1.12.16.2     matt 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    545  1.12.16.2     matt 	struct uvmpdpol_groupstate *gs = new_gs;
    546  1.12.16.2     matt 
    547  1.12.16.2     matt 	s->s_pggroups = gs;
    548        1.2     yamt 
    549  1.12.16.2     matt 	struct uvm_pggroup *grp = uvm.pggroups;
    550  1.12.16.2     matt 	for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
    551  1.12.16.2     matt 		TAILQ_INIT(&gs->gs_activeq);
    552  1.12.16.2     matt 		TAILQ_INIT(&gs->gs_inactiveq);
    553  1.12.16.5     matt 		TAILQ_INIT(&gs->gs_radioactiveq);
    554  1.12.16.2     matt 		grp->pgrp_gs = gs;
    555  1.12.16.2     matt 		KASSERT(gs->gs_active == 0);
    556  1.12.16.2     matt 		KASSERT(gs->gs_inactive == 0);
    557  1.12.16.5     matt 		KASSERT(gs->gs_radioactive == 0);
    558  1.12.16.2     matt 		KASSERT(grp->pgrp_active == 0);
    559  1.12.16.2     matt 		KASSERT(grp->pgrp_inactive == 0);
    560  1.12.16.2     matt 	}
    561        1.2     yamt 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    562        1.2     yamt 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    563        1.2     yamt 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    564        1.2     yamt 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    565        1.2     yamt 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    566        1.2     yamt 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    567        1.2     yamt 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    568  1.12.16.2     matt 
    569  1.12.16.2     matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    570  1.12.16.2     matt 		uvmpdpol_scaninit(grp);
    571  1.12.16.2     matt 	}
    572        1.2     yamt }
    573        1.2     yamt 
    574        1.2     yamt void
    575        1.2     yamt uvmpdpol_reinit(void)
    576        1.2     yamt {
    577        1.2     yamt }
    578        1.2     yamt 
    579        1.7  thorpej bool
    580  1.12.16.2     matt uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
    581        1.2     yamt {
    582  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    583        1.2     yamt 
    584  1.12.16.2     matt 	return gs->gs_inactive < gs->gs_inactarg;
    585        1.2     yamt }
    586        1.2     yamt 
    587        1.2     yamt void
    588  1.12.16.2     matt uvmpdpol_tune(struct uvm_pggroup *grp)
    589        1.2     yamt {
    590        1.2     yamt 
    591  1.12.16.2     matt 	clock_tune(grp);
    592  1.12.16.2     matt }
    593  1.12.16.2     matt 
    594  1.12.16.2     matt size_t
    595  1.12.16.2     matt uvmpdpol_space(void)
    596  1.12.16.2     matt {
    597  1.12.16.2     matt 
    598  1.12.16.2     matt 	return clock_space();
    599  1.12.16.2     matt }
    600  1.12.16.2     matt 
    601  1.12.16.2     matt void
    602  1.12.16.2     matt uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
    603  1.12.16.2     matt 	size_t npggroups, size_t old_ncolors)
    604  1.12.16.2     matt {
    605  1.12.16.2     matt 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    606  1.12.16.2     matt 	struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
    607  1.12.16.2     matt 	struct uvmpdpol_groupstate * const gs = new_gs;
    608  1.12.16.2     matt 
    609  1.12.16.2     matt 	s->s_pggroups = gs;
    610  1.12.16.2     matt 
    611  1.12.16.2     matt 	for (size_t i = 0; i < npggroups; i++) {
    612  1.12.16.2     matt 		struct uvmpdpol_groupstate * const dst_gs = &gs[i];
    613  1.12.16.2     matt 		TAILQ_INIT(&dst_gs->gs_activeq);
    614  1.12.16.2     matt 		TAILQ_INIT(&dst_gs->gs_inactiveq);
    615  1.12.16.5     matt 		TAILQ_INIT(&dst_gs->gs_radioactiveq);
    616  1.12.16.2     matt 		uvm.pggroups[i].pgrp_gs = dst_gs;
    617  1.12.16.2     matt 	}
    618  1.12.16.2     matt 
    619  1.12.16.2     matt 	const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
    620  1.12.16.2     matt 	for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
    621  1.12.16.2     matt 		struct vm_page *pg;
    622  1.12.16.2     matt 		KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
    623  1.12.16.2     matt 		while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
    624  1.12.16.2     matt 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    625  1.12.16.2     matt 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    626  1.12.16.2     matt 
    627  1.12.16.2     matt 			TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
    628  1.12.16.2     matt 			src_gs->gs_inactive--;
    629  1.12.16.2     matt 			xgs->gs_inactive++;
    630  1.12.16.2     matt 			uvm.pggroups[pggroup].pgrp_inactive++;
    631  1.12.16.2     matt 			KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
    632  1.12.16.2     matt 		}
    633  1.12.16.2     matt 		KASSERT(src_gs->gs_inactive == 0);
    634  1.12.16.2     matt 
    635  1.12.16.2     matt 		KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
    636  1.12.16.2     matt 		while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
    637  1.12.16.2     matt 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    638  1.12.16.2     matt 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    639  1.12.16.2     matt 
    640  1.12.16.2     matt 			TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
    641  1.12.16.2     matt 			src_gs->gs_active--;
    642  1.12.16.2     matt 			xgs->gs_active++;
    643  1.12.16.2     matt 			KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
    644  1.12.16.2     matt 			uvm.pggroups[pggroup].pgrp_active++;
    645  1.12.16.2     matt 		}
    646  1.12.16.2     matt 		KASSERT(src_gs->gs_active == 0);
    647  1.12.16.5     matt 
    648  1.12.16.5     matt 		KDASSERT(src_gs->gs_radioactive == clock_pglist_count(&src_gs->gs_radioactiveq));
    649  1.12.16.5     matt 		while ((pg = TAILQ_FIRST(&src_gs->gs_radioactiveq)) != NULL) {
    650  1.12.16.5     matt 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    651  1.12.16.5     matt 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    652  1.12.16.5     matt 
    653  1.12.16.5     matt 			TAILQ_INSERT_TAIL(&xgs->gs_radioactiveq, pg, pageq.queue);
    654  1.12.16.5     matt 			src_gs->gs_radioactive--;
    655  1.12.16.5     matt 			xgs->gs_radioactive++;
    656  1.12.16.5     matt 			KDASSERT(xgs->gs_radioactive == clock_pglist_count(&xgs->gs_radioactiveq));
    657  1.12.16.5     matt 			uvm.pggroups[pggroup].pgrp_active++;
    658  1.12.16.5     matt 		}
    659  1.12.16.5     matt 		KASSERT(src_gs->gs_active == 0);
    660  1.12.16.2     matt 	}
    661  1.12.16.2     matt 
    662  1.12.16.2     matt 	struct uvm_pggroup *grp;
    663  1.12.16.2     matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    664  1.12.16.2     matt 		clock_tune(grp);
    665  1.12.16.2     matt 		uvmpdpol_scaninit(grp);
    666  1.12.16.2     matt 	}
    667        1.2     yamt }
    668        1.2     yamt 
    669        1.2     yamt #if !defined(PDSIM)
    670        1.2     yamt 
    671        1.2     yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    672        1.2     yamt 
    673        1.2     yamt void
    674        1.2     yamt uvmpdpol_sysctlsetup(void)
    675        1.2     yamt {
    676        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    677        1.2     yamt 
    678        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    679        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    680        1.2     yamt 	    "for anonymous application data"));
    681        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    682        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    683       1.11   martin 	    "for cached file data"));
    684        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    685        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    686       1.11   martin 	    "for cached executable data"));
    687        1.2     yamt 
    688        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    689        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    690        1.2     yamt 	    "be reclaimed from other usage for "
    691        1.2     yamt 	    "anonymous application data"));
    692        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    693        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    694        1.2     yamt 	    "be reclaimed from other usage for cached "
    695        1.2     yamt 	    "file data"));
    696        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    697        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    698        1.2     yamt 	    "be reclaimed from other usage for cached "
    699        1.2     yamt 	    "executable data"));
    700        1.2     yamt 
    701        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    702        1.2     yamt 	    SYSCTL_DESCR("Percentage of inactive queue of "
    703        1.2     yamt 	    "the entire (active + inactive) queue"));
    704        1.2     yamt }
    705        1.2     yamt 
    706        1.2     yamt #endif /* !defined(PDSIM) */
    707        1.2     yamt 
    708        1.2     yamt #if defined(PDSIM)
    709        1.2     yamt void
    710        1.2     yamt pdsim_dump(const char *id)
    711        1.2     yamt {
    712        1.2     yamt #if defined(DEBUG)
    713        1.2     yamt 	/* XXX */
    714        1.2     yamt #endif /* defined(DEBUG) */
    715        1.2     yamt }
    716        1.2     yamt #endif /* defined(PDSIM) */
    717