Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.13
      1  1.13    chuck /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.13 2011/02/02 15:25:27 chuck Exp $	*/
      2   1.2     yamt /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3   1.2     yamt 
      4   1.2     yamt /*
      5   1.2     yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6   1.2     yamt  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7   1.2     yamt  *
      8   1.2     yamt  * All rights reserved.
      9   1.2     yamt  *
     10   1.2     yamt  * This code is derived from software contributed to Berkeley by
     11   1.2     yamt  * The Mach Operating System project at Carnegie-Mellon University.
     12   1.2     yamt  *
     13   1.2     yamt  * Redistribution and use in source and binary forms, with or without
     14   1.2     yamt  * modification, are permitted provided that the following conditions
     15   1.2     yamt  * are met:
     16   1.2     yamt  * 1. Redistributions of source code must retain the above copyright
     17   1.2     yamt  *    notice, this list of conditions and the following disclaimer.
     18   1.2     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     19   1.2     yamt  *    notice, this list of conditions and the following disclaimer in the
     20   1.2     yamt  *    documentation and/or other materials provided with the distribution.
     21  1.13    chuck  * 3. Neither the name of the University nor the names of its contributors
     22   1.2     yamt  *    may be used to endorse or promote products derived from this software
     23   1.2     yamt  *    without specific prior written permission.
     24   1.2     yamt  *
     25   1.2     yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26   1.2     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27   1.2     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28   1.2     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29   1.2     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30   1.2     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31   1.2     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.2     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.2     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.2     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.2     yamt  * SUCH DAMAGE.
     36   1.2     yamt  *
     37   1.2     yamt  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     38   1.2     yamt  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     39   1.2     yamt  *
     40   1.2     yamt  *
     41   1.2     yamt  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     42   1.2     yamt  * All rights reserved.
     43   1.2     yamt  *
     44   1.2     yamt  * Permission to use, copy, modify and distribute this software and
     45   1.2     yamt  * its documentation is hereby granted, provided that both the copyright
     46   1.2     yamt  * notice and this permission notice appear in all copies of the
     47   1.2     yamt  * software, derivative works or modified versions, and any portions
     48   1.2     yamt  * thereof, and that both notices appear in supporting documentation.
     49   1.2     yamt  *
     50   1.2     yamt  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     51   1.2     yamt  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     52   1.2     yamt  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     53   1.2     yamt  *
     54   1.2     yamt  * Carnegie Mellon requests users of this software to return to
     55   1.2     yamt  *
     56   1.2     yamt  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     57   1.2     yamt  *  School of Computer Science
     58   1.2     yamt  *  Carnegie Mellon University
     59   1.2     yamt  *  Pittsburgh PA 15213-3890
     60   1.2     yamt  *
     61   1.2     yamt  * any improvements or extensions that they make and grant Carnegie the
     62   1.2     yamt  * rights to redistribute these changes.
     63   1.2     yamt  */
     64   1.2     yamt 
     65   1.2     yamt #if defined(PDSIM)
     66   1.2     yamt 
     67   1.2     yamt #include "pdsim.h"
     68   1.2     yamt 
     69   1.2     yamt #else /* defined(PDSIM) */
     70   1.2     yamt 
     71   1.2     yamt #include <sys/cdefs.h>
     72  1.13    chuck __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.13 2011/02/02 15:25:27 chuck Exp $");
     73   1.2     yamt 
     74   1.2     yamt #include <sys/param.h>
     75   1.2     yamt #include <sys/proc.h>
     76   1.2     yamt #include <sys/systm.h>
     77   1.2     yamt #include <sys/kernel.h>
     78   1.2     yamt 
     79   1.2     yamt #include <uvm/uvm.h>
     80   1.2     yamt #include <uvm/uvm_pdpolicy.h>
     81   1.2     yamt #include <uvm/uvm_pdpolicy_impl.h>
     82   1.2     yamt 
     83   1.2     yamt #endif /* defined(PDSIM) */
     84   1.2     yamt 
     85   1.2     yamt #define PQ_INACTIVE	PQ_PRIVATE1	/* page is in inactive list */
     86   1.2     yamt #define PQ_ACTIVE	PQ_PRIVATE2	/* page is in active list */
     87   1.2     yamt 
     88   1.2     yamt #if !defined(CLOCK_INACTIVEPCT)
     89   1.2     yamt #define	CLOCK_INACTIVEPCT	33
     90   1.2     yamt #endif /* !defined(CLOCK_INACTIVEPCT) */
     91   1.2     yamt 
     92   1.2     yamt struct uvmpdpol_globalstate {
     93   1.2     yamt 	struct pglist s_activeq;	/* allocated pages, in use */
     94   1.2     yamt 	struct pglist s_inactiveq;	/* pages between the clock hands */
     95   1.2     yamt 	int s_active;
     96   1.2     yamt 	int s_inactive;
     97   1.2     yamt 	int s_inactarg;
     98   1.2     yamt 	struct uvm_pctparam s_anonmin;
     99   1.2     yamt 	struct uvm_pctparam s_filemin;
    100   1.2     yamt 	struct uvm_pctparam s_execmin;
    101   1.2     yamt 	struct uvm_pctparam s_anonmax;
    102   1.2     yamt 	struct uvm_pctparam s_filemax;
    103   1.2     yamt 	struct uvm_pctparam s_execmax;
    104   1.2     yamt 	struct uvm_pctparam s_inactivepct;
    105   1.2     yamt };
    106   1.2     yamt 
    107   1.2     yamt struct uvmpdpol_scanstate {
    108   1.7  thorpej 	bool ss_first;
    109   1.7  thorpej 	bool ss_anonreact, ss_filereact, ss_execreact;
    110   1.2     yamt 	struct vm_page *ss_nextpg;
    111   1.2     yamt };
    112   1.2     yamt 
    113   1.2     yamt static struct uvmpdpol_globalstate pdpol_state;
    114   1.2     yamt static struct uvmpdpol_scanstate pdpol_scanstate;
    115   1.2     yamt 
    116   1.2     yamt PDPOL_EVCNT_DEFINE(reactexec)
    117   1.2     yamt PDPOL_EVCNT_DEFINE(reactfile)
    118   1.2     yamt PDPOL_EVCNT_DEFINE(reactanon)
    119   1.2     yamt 
    120   1.2     yamt static void
    121   1.2     yamt clock_tune(void)
    122   1.2     yamt {
    123   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    124   1.2     yamt 
    125   1.2     yamt 	s->s_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    126   1.2     yamt 	    s->s_active + s->s_inactive);
    127   1.2     yamt 	if (s->s_inactarg <= uvmexp.freetarg) {
    128   1.2     yamt 		s->s_inactarg = uvmexp.freetarg + 1;
    129   1.2     yamt 	}
    130   1.2     yamt }
    131   1.2     yamt 
    132   1.2     yamt void
    133   1.2     yamt uvmpdpol_scaninit(void)
    134   1.2     yamt {
    135   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    136   1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    137   1.2     yamt 	int t;
    138   1.7  thorpej 	bool anonunder, fileunder, execunder;
    139   1.7  thorpej 	bool anonover, fileover, execover;
    140   1.7  thorpej 	bool anonreact, filereact, execreact;
    141   1.2     yamt 
    142   1.2     yamt 	/*
    143   1.2     yamt 	 * decide which types of pages we want to reactivate instead of freeing
    144   1.2     yamt 	 * to keep usage within the minimum and maximum usage limits.
    145   1.2     yamt 	 */
    146   1.2     yamt 
    147   1.2     yamt 	t = s->s_active + s->s_inactive + uvmexp.free;
    148   1.2     yamt 	anonunder = uvmexp.anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    149   1.2     yamt 	fileunder = uvmexp.filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    150   1.2     yamt 	execunder = uvmexp.execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    151   1.2     yamt 	anonover = uvmexp.anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    152   1.2     yamt 	fileover = uvmexp.filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    153   1.2     yamt 	execover = uvmexp.execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    154   1.2     yamt 	anonreact = anonunder || (!anonover && (fileover || execover));
    155   1.2     yamt 	filereact = fileunder || (!fileover && (anonover || execover));
    156   1.2     yamt 	execreact = execunder || (!execover && (anonover || fileover));
    157   1.2     yamt 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    158   1.8  thorpej 		anonreact = filereact = execreact = false;
    159   1.2     yamt 	}
    160   1.2     yamt 	ss->ss_anonreact = anonreact;
    161   1.2     yamt 	ss->ss_filereact = filereact;
    162   1.2     yamt 	ss->ss_execreact = execreact;
    163   1.2     yamt 
    164   1.8  thorpej 	ss->ss_first = true;
    165   1.2     yamt }
    166   1.2     yamt 
    167   1.2     yamt struct vm_page *
    168   1.2     yamt uvmpdpol_selectvictim(void)
    169   1.2     yamt {
    170   1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    171   1.2     yamt 	struct vm_page *pg;
    172   1.2     yamt 
    173   1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    174   1.2     yamt 
    175   1.2     yamt 	while (/* CONSTCOND */ 1) {
    176   1.2     yamt 		struct vm_anon *anon;
    177   1.2     yamt 		struct uvm_object *uobj;
    178   1.2     yamt 
    179   1.2     yamt 		if (ss->ss_first) {
    180   1.2     yamt 			pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
    181   1.8  thorpej 			ss->ss_first = false;
    182   1.2     yamt 		} else {
    183   1.2     yamt 			pg = ss->ss_nextpg;
    184   1.2     yamt 			if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
    185   1.2     yamt 				pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
    186   1.2     yamt 			}
    187   1.2     yamt 		}
    188   1.2     yamt 		if (pg == NULL) {
    189   1.2     yamt 			break;
    190   1.2     yamt 		}
    191  1.12       ad 		ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
    192   1.2     yamt 
    193   1.2     yamt 		uvmexp.pdscans++;
    194   1.2     yamt 
    195   1.2     yamt 		/*
    196   1.2     yamt 		 * move referenced pages back to active queue and
    197   1.2     yamt 		 * skip to next page.
    198   1.2     yamt 		 */
    199   1.2     yamt 
    200   1.2     yamt 		if (pmap_is_referenced(pg)) {
    201   1.2     yamt 			uvmpdpol_pageactivate(pg);
    202   1.2     yamt 			uvmexp.pdreact++;
    203   1.2     yamt 			continue;
    204   1.2     yamt 		}
    205   1.2     yamt 
    206   1.2     yamt 		anon = pg->uanon;
    207   1.2     yamt 		uobj = pg->uobject;
    208   1.2     yamt 
    209   1.2     yamt 		/*
    210   1.2     yamt 		 * enforce the minimum thresholds on different
    211   1.2     yamt 		 * types of memory usage.  if reusing the current
    212   1.2     yamt 		 * page would reduce that type of usage below its
    213   1.2     yamt 		 * minimum, reactivate the page instead and move
    214   1.2     yamt 		 * on to the next page.
    215   1.2     yamt 		 */
    216   1.2     yamt 
    217   1.2     yamt 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    218   1.2     yamt 			uvmpdpol_pageactivate(pg);
    219   1.2     yamt 			PDPOL_EVCNT_INCR(reactexec);
    220   1.2     yamt 			continue;
    221   1.2     yamt 		}
    222   1.2     yamt 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    223   1.2     yamt 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    224   1.2     yamt 			uvmpdpol_pageactivate(pg);
    225   1.2     yamt 			PDPOL_EVCNT_INCR(reactfile);
    226   1.2     yamt 			continue;
    227   1.2     yamt 		}
    228   1.2     yamt 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    229   1.2     yamt 			uvmpdpol_pageactivate(pg);
    230   1.2     yamt 			PDPOL_EVCNT_INCR(reactanon);
    231   1.2     yamt 			continue;
    232   1.2     yamt 		}
    233   1.2     yamt 
    234   1.2     yamt 		break;
    235   1.2     yamt 	}
    236   1.2     yamt 
    237   1.2     yamt 	return pg;
    238   1.2     yamt }
    239   1.2     yamt 
    240   1.2     yamt void
    241   1.2     yamt uvmpdpol_balancequeue(int swap_shortage)
    242   1.2     yamt {
    243   1.2     yamt 	int inactive_shortage;
    244   1.2     yamt 	struct vm_page *p, *nextpg;
    245   1.2     yamt 
    246   1.2     yamt 	/*
    247   1.2     yamt 	 * we have done the scan to get free pages.   now we work on meeting
    248   1.2     yamt 	 * our inactive target.
    249   1.2     yamt 	 */
    250   1.2     yamt 
    251   1.2     yamt 	inactive_shortage = pdpol_state.s_inactarg - pdpol_state.s_inactive;
    252   1.2     yamt 	for (p = TAILQ_FIRST(&pdpol_state.s_activeq);
    253   1.2     yamt 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
    254   1.2     yamt 	     p = nextpg) {
    255  1.12       ad 		nextpg = TAILQ_NEXT(p, pageq.queue);
    256   1.2     yamt 
    257   1.2     yamt 		/*
    258   1.2     yamt 		 * if there's a shortage of swap slots, try to free it.
    259   1.2     yamt 		 */
    260   1.2     yamt 
    261   1.2     yamt 		if (swap_shortage > 0 && (p->pqflags & PQ_SWAPBACKED) != 0) {
    262   1.2     yamt 			if (uvmpd_trydropswap(p)) {
    263   1.2     yamt 				swap_shortage--;
    264   1.2     yamt 			}
    265   1.2     yamt 		}
    266   1.2     yamt 
    267   1.2     yamt 		/*
    268   1.2     yamt 		 * if there's a shortage of inactive pages, deactivate.
    269   1.2     yamt 		 */
    270   1.2     yamt 
    271   1.2     yamt 		if (inactive_shortage > 0) {
    272   1.2     yamt 			/* no need to check wire_count as pg is "active" */
    273   1.2     yamt 			uvmpdpol_pagedeactivate(p);
    274   1.2     yamt 			uvmexp.pddeact++;
    275   1.2     yamt 			inactive_shortage--;
    276   1.2     yamt 		}
    277   1.2     yamt 	}
    278   1.2     yamt }
    279   1.2     yamt 
    280   1.2     yamt void
    281   1.2     yamt uvmpdpol_pagedeactivate(struct vm_page *pg)
    282   1.2     yamt {
    283   1.2     yamt 
    284   1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    285   1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    286  1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue);
    287   1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    288   1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    289   1.2     yamt 		pdpol_state.s_active--;
    290   1.2     yamt 	}
    291   1.2     yamt 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    292   1.2     yamt 		KASSERT(pg->wire_count == 0);
    293  1.10     yamt 		pmap_clear_reference(pg);
    294  1.12       ad 		TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pageq.queue);
    295   1.2     yamt 		pg->pqflags |= PQ_INACTIVE;
    296   1.2     yamt 		pdpol_state.s_inactive++;
    297   1.2     yamt 	}
    298   1.2     yamt }
    299   1.2     yamt 
    300   1.2     yamt void
    301   1.2     yamt uvmpdpol_pageactivate(struct vm_page *pg)
    302   1.2     yamt {
    303   1.2     yamt 
    304   1.2     yamt 	uvmpdpol_pagedequeue(pg);
    305  1.12       ad 	TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq.queue);
    306   1.2     yamt 	pg->pqflags |= PQ_ACTIVE;
    307   1.2     yamt 	pdpol_state.s_active++;
    308   1.2     yamt }
    309   1.2     yamt 
    310   1.2     yamt void
    311   1.2     yamt uvmpdpol_pagedequeue(struct vm_page *pg)
    312   1.2     yamt {
    313   1.2     yamt 
    314   1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    315   1.9       ad 		KASSERT(mutex_owned(&uvm_pageqlock));
    316  1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue);
    317   1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    318   1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    319   1.2     yamt 		pdpol_state.s_active--;
    320   1.2     yamt 	} else if (pg->pqflags & PQ_INACTIVE) {
    321   1.9       ad 		KASSERT(mutex_owned(&uvm_pageqlock));
    322  1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pageq.queue);
    323   1.2     yamt 		pg->pqflags &= ~PQ_INACTIVE;
    324   1.2     yamt 		KASSERT(pdpol_state.s_inactive > 0);
    325   1.2     yamt 		pdpol_state.s_inactive--;
    326   1.2     yamt 	}
    327   1.2     yamt }
    328   1.2     yamt 
    329   1.2     yamt void
    330   1.2     yamt uvmpdpol_pageenqueue(struct vm_page *pg)
    331   1.2     yamt {
    332   1.2     yamt 
    333   1.2     yamt 	uvmpdpol_pageactivate(pg);
    334   1.2     yamt }
    335   1.2     yamt 
    336   1.2     yamt void
    337   1.5     yamt uvmpdpol_anfree(struct vm_anon *an)
    338   1.2     yamt {
    339   1.2     yamt }
    340   1.2     yamt 
    341   1.7  thorpej bool
    342   1.2     yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
    343   1.2     yamt {
    344   1.2     yamt 
    345   1.2     yamt 	return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
    346   1.2     yamt }
    347   1.2     yamt 
    348   1.2     yamt void
    349   1.2     yamt uvmpdpol_estimatepageable(int *active, int *inactive)
    350   1.2     yamt {
    351   1.2     yamt 
    352   1.2     yamt 	if (active) {
    353   1.2     yamt 		*active = pdpol_state.s_active;
    354   1.2     yamt 	}
    355   1.2     yamt 	if (inactive) {
    356   1.2     yamt 		*inactive = pdpol_state.s_inactive;
    357   1.2     yamt 	}
    358   1.2     yamt }
    359   1.2     yamt 
    360   1.2     yamt #if !defined(PDSIM)
    361   1.2     yamt static int
    362   1.2     yamt min_check(struct uvm_pctparam *pct, int t)
    363   1.2     yamt {
    364   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    365   1.2     yamt 	int total = t;
    366   1.2     yamt 
    367   1.2     yamt 	if (pct != &s->s_anonmin) {
    368   1.2     yamt 		total += uvm_pctparam_get(&s->s_anonmin);
    369   1.2     yamt 	}
    370   1.2     yamt 	if (pct != &s->s_filemin) {
    371   1.2     yamt 		total += uvm_pctparam_get(&s->s_filemin);
    372   1.2     yamt 	}
    373   1.2     yamt 	if (pct != &s->s_execmin) {
    374   1.2     yamt 		total += uvm_pctparam_get(&s->s_execmin);
    375   1.2     yamt 	}
    376   1.2     yamt 	if (total > 95) {
    377   1.2     yamt 		return EINVAL;
    378   1.2     yamt 	}
    379   1.2     yamt 	return 0;
    380   1.2     yamt }
    381   1.2     yamt #endif /* !defined(PDSIM) */
    382   1.2     yamt 
    383   1.2     yamt void
    384   1.2     yamt uvmpdpol_init(void)
    385   1.2     yamt {
    386   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    387   1.2     yamt 
    388   1.2     yamt 	TAILQ_INIT(&s->s_activeq);
    389   1.2     yamt 	TAILQ_INIT(&s->s_inactiveq);
    390   1.2     yamt 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    391   1.2     yamt 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    392   1.2     yamt 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    393   1.2     yamt 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    394   1.2     yamt 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    395   1.2     yamt 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    396   1.2     yamt 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    397   1.2     yamt }
    398   1.2     yamt 
    399   1.2     yamt void
    400   1.2     yamt uvmpdpol_reinit(void)
    401   1.2     yamt {
    402   1.2     yamt }
    403   1.2     yamt 
    404   1.7  thorpej bool
    405   1.2     yamt uvmpdpol_needsscan_p(void)
    406   1.2     yamt {
    407   1.2     yamt 
    408   1.2     yamt 	return pdpol_state.s_inactive < pdpol_state.s_inactarg;
    409   1.2     yamt }
    410   1.2     yamt 
    411   1.2     yamt void
    412   1.2     yamt uvmpdpol_tune(void)
    413   1.2     yamt {
    414   1.2     yamt 
    415   1.2     yamt 	clock_tune();
    416   1.2     yamt }
    417   1.2     yamt 
    418   1.2     yamt #if !defined(PDSIM)
    419   1.2     yamt 
    420   1.2     yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    421   1.2     yamt 
    422   1.2     yamt void
    423   1.2     yamt uvmpdpol_sysctlsetup(void)
    424   1.2     yamt {
    425   1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    426   1.2     yamt 
    427   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    428   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    429   1.2     yamt 	    "for anonymous application data"));
    430   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    431   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    432  1.11   martin 	    "for cached file data"));
    433   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    434   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    435  1.11   martin 	    "for cached executable data"));
    436   1.2     yamt 
    437   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    438   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    439   1.2     yamt 	    "be reclaimed from other usage for "
    440   1.2     yamt 	    "anonymous application data"));
    441   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    442   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    443   1.2     yamt 	    "be reclaimed from other usage for cached "
    444   1.2     yamt 	    "file data"));
    445   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    446   1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    447   1.2     yamt 	    "be reclaimed from other usage for cached "
    448   1.2     yamt 	    "executable data"));
    449   1.2     yamt 
    450   1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    451   1.2     yamt 	    SYSCTL_DESCR("Percentage of inactive queue of "
    452   1.2     yamt 	    "the entire (active + inactive) queue"));
    453   1.2     yamt }
    454   1.2     yamt 
    455   1.2     yamt #endif /* !defined(PDSIM) */
    456   1.2     yamt 
    457   1.2     yamt #if defined(PDSIM)
    458   1.2     yamt void
    459   1.2     yamt pdsim_dump(const char *id)
    460   1.2     yamt {
    461   1.2     yamt #if defined(DEBUG)
    462   1.2     yamt 	/* XXX */
    463   1.2     yamt #endif /* defined(DEBUG) */
    464   1.2     yamt }
    465   1.2     yamt #endif /* defined(PDSIM) */
    466