Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.12.20.2
      1  1.12.20.2    rmind /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.12.20.2 2011/03/05 20:56:38 rmind Exp $	*/
      2        1.2     yamt /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3        1.2     yamt 
      4        1.2     yamt /*
      5        1.2     yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6        1.2     yamt  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7        1.2     yamt  *
      8        1.2     yamt  * All rights reserved.
      9        1.2     yamt  *
     10        1.2     yamt  * This code is derived from software contributed to Berkeley by
     11        1.2     yamt  * The Mach Operating System project at Carnegie-Mellon University.
     12        1.2     yamt  *
     13        1.2     yamt  * Redistribution and use in source and binary forms, with or without
     14        1.2     yamt  * modification, are permitted provided that the following conditions
     15        1.2     yamt  * are met:
     16        1.2     yamt  * 1. Redistributions of source code must retain the above copyright
     17        1.2     yamt  *    notice, this list of conditions and the following disclaimer.
     18        1.2     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     19        1.2     yamt  *    notice, this list of conditions and the following disclaimer in the
     20        1.2     yamt  *    documentation and/or other materials provided with the distribution.
     21  1.12.20.2    rmind  * 3. Neither the name of the University nor the names of its contributors
     22        1.2     yamt  *    may be used to endorse or promote products derived from this software
     23        1.2     yamt  *    without specific prior written permission.
     24        1.2     yamt  *
     25        1.2     yamt  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26        1.2     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27        1.2     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28        1.2     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29        1.2     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30        1.2     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31        1.2     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32        1.2     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33        1.2     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34        1.2     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35        1.2     yamt  * SUCH DAMAGE.
     36        1.2     yamt  *
     37        1.2     yamt  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     38        1.2     yamt  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     39        1.2     yamt  *
     40        1.2     yamt  *
     41        1.2     yamt  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     42        1.2     yamt  * All rights reserved.
     43        1.2     yamt  *
     44        1.2     yamt  * Permission to use, copy, modify and distribute this software and
     45        1.2     yamt  * its documentation is hereby granted, provided that both the copyright
     46        1.2     yamt  * notice and this permission notice appear in all copies of the
     47        1.2     yamt  * software, derivative works or modified versions, and any portions
     48        1.2     yamt  * thereof, and that both notices appear in supporting documentation.
     49        1.2     yamt  *
     50        1.2     yamt  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     51        1.2     yamt  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     52        1.2     yamt  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     53        1.2     yamt  *
     54        1.2     yamt  * Carnegie Mellon requests users of this software to return to
     55        1.2     yamt  *
     56        1.2     yamt  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     57        1.2     yamt  *  School of Computer Science
     58        1.2     yamt  *  Carnegie Mellon University
     59        1.2     yamt  *  Pittsburgh PA 15213-3890
     60        1.2     yamt  *
     61        1.2     yamt  * any improvements or extensions that they make and grant Carnegie the
     62        1.2     yamt  * rights to redistribute these changes.
     63        1.2     yamt  */
     64        1.2     yamt 
     65        1.2     yamt #if defined(PDSIM)
     66        1.2     yamt 
     67        1.2     yamt #include "pdsim.h"
     68        1.2     yamt 
     69        1.2     yamt #else /* defined(PDSIM) */
     70        1.2     yamt 
     71        1.2     yamt #include <sys/cdefs.h>
     72  1.12.20.2    rmind __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.20.2 2011/03/05 20:56:38 rmind Exp $");
     73        1.2     yamt 
     74        1.2     yamt #include <sys/param.h>
     75        1.2     yamt #include <sys/proc.h>
     76        1.2     yamt #include <sys/systm.h>
     77        1.2     yamt #include <sys/kernel.h>
     78        1.2     yamt 
     79        1.2     yamt #include <uvm/uvm.h>
     80        1.2     yamt #include <uvm/uvm_pdpolicy.h>
     81        1.2     yamt #include <uvm/uvm_pdpolicy_impl.h>
     82        1.2     yamt 
     83        1.2     yamt #endif /* defined(PDSIM) */
     84        1.2     yamt 
     85        1.2     yamt #define PQ_INACTIVE	PQ_PRIVATE1	/* page is in inactive list */
     86        1.2     yamt #define PQ_ACTIVE	PQ_PRIVATE2	/* page is in active list */
     87        1.2     yamt 
     88        1.2     yamt #if !defined(CLOCK_INACTIVEPCT)
     89        1.2     yamt #define	CLOCK_INACTIVEPCT	33
     90        1.2     yamt #endif /* !defined(CLOCK_INACTIVEPCT) */
     91        1.2     yamt 
     92        1.2     yamt struct uvmpdpol_globalstate {
     93        1.2     yamt 	struct pglist s_activeq;	/* allocated pages, in use */
     94        1.2     yamt 	struct pglist s_inactiveq;	/* pages between the clock hands */
     95        1.2     yamt 	int s_active;
     96        1.2     yamt 	int s_inactive;
     97        1.2     yamt 	int s_inactarg;
     98        1.2     yamt 	struct uvm_pctparam s_anonmin;
     99        1.2     yamt 	struct uvm_pctparam s_filemin;
    100        1.2     yamt 	struct uvm_pctparam s_execmin;
    101        1.2     yamt 	struct uvm_pctparam s_anonmax;
    102        1.2     yamt 	struct uvm_pctparam s_filemax;
    103        1.2     yamt 	struct uvm_pctparam s_execmax;
    104        1.2     yamt 	struct uvm_pctparam s_inactivepct;
    105        1.2     yamt };
    106        1.2     yamt 
    107        1.2     yamt struct uvmpdpol_scanstate {
    108        1.7  thorpej 	bool ss_first;
    109        1.7  thorpej 	bool ss_anonreact, ss_filereact, ss_execreact;
    110        1.2     yamt 	struct vm_page *ss_nextpg;
    111        1.2     yamt };
    112        1.2     yamt 
    113        1.2     yamt static struct uvmpdpol_globalstate pdpol_state;
    114        1.2     yamt static struct uvmpdpol_scanstate pdpol_scanstate;
    115        1.2     yamt 
    116        1.2     yamt PDPOL_EVCNT_DEFINE(reactexec)
    117        1.2     yamt PDPOL_EVCNT_DEFINE(reactfile)
    118        1.2     yamt PDPOL_EVCNT_DEFINE(reactanon)
    119        1.2     yamt 
    120        1.2     yamt static void
    121        1.2     yamt clock_tune(void)
    122        1.2     yamt {
    123        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    124        1.2     yamt 
    125        1.2     yamt 	s->s_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    126        1.2     yamt 	    s->s_active + s->s_inactive);
    127        1.2     yamt 	if (s->s_inactarg <= uvmexp.freetarg) {
    128        1.2     yamt 		s->s_inactarg = uvmexp.freetarg + 1;
    129        1.2     yamt 	}
    130        1.2     yamt }
    131        1.2     yamt 
    132        1.2     yamt void
    133        1.2     yamt uvmpdpol_scaninit(void)
    134        1.2     yamt {
    135        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    136        1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    137        1.2     yamt 	int t;
    138        1.7  thorpej 	bool anonunder, fileunder, execunder;
    139        1.7  thorpej 	bool anonover, fileover, execover;
    140        1.7  thorpej 	bool anonreact, filereact, execreact;
    141        1.2     yamt 
    142        1.2     yamt 	/*
    143        1.2     yamt 	 * decide which types of pages we want to reactivate instead of freeing
    144        1.2     yamt 	 * to keep usage within the minimum and maximum usage limits.
    145        1.2     yamt 	 */
    146        1.2     yamt 
    147        1.2     yamt 	t = s->s_active + s->s_inactive + uvmexp.free;
    148        1.2     yamt 	anonunder = uvmexp.anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    149        1.2     yamt 	fileunder = uvmexp.filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    150        1.2     yamt 	execunder = uvmexp.execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    151        1.2     yamt 	anonover = uvmexp.anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    152        1.2     yamt 	fileover = uvmexp.filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    153        1.2     yamt 	execover = uvmexp.execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    154        1.2     yamt 	anonreact = anonunder || (!anonover && (fileover || execover));
    155        1.2     yamt 	filereact = fileunder || (!fileover && (anonover || execover));
    156        1.2     yamt 	execreact = execunder || (!execover && (anonover || fileover));
    157        1.2     yamt 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    158        1.8  thorpej 		anonreact = filereact = execreact = false;
    159        1.2     yamt 	}
    160        1.2     yamt 	ss->ss_anonreact = anonreact;
    161        1.2     yamt 	ss->ss_filereact = filereact;
    162        1.2     yamt 	ss->ss_execreact = execreact;
    163        1.2     yamt 
    164        1.8  thorpej 	ss->ss_first = true;
    165        1.2     yamt }
    166        1.2     yamt 
    167        1.2     yamt struct vm_page *
    168        1.2     yamt uvmpdpol_selectvictim(void)
    169        1.2     yamt {
    170        1.2     yamt 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    171        1.2     yamt 	struct vm_page *pg;
    172  1.12.20.1    rmind 	kmutex_t *lock;
    173        1.2     yamt 
    174        1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    175        1.2     yamt 
    176        1.2     yamt 	while (/* CONSTCOND */ 1) {
    177        1.2     yamt 		struct vm_anon *anon;
    178        1.2     yamt 		struct uvm_object *uobj;
    179        1.2     yamt 
    180        1.2     yamt 		if (ss->ss_first) {
    181        1.2     yamt 			pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
    182        1.8  thorpej 			ss->ss_first = false;
    183        1.2     yamt 		} else {
    184        1.2     yamt 			pg = ss->ss_nextpg;
    185        1.2     yamt 			if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
    186        1.2     yamt 				pg = TAILQ_FIRST(&pdpol_state.s_inactiveq);
    187        1.2     yamt 			}
    188        1.2     yamt 		}
    189        1.2     yamt 		if (pg == NULL) {
    190        1.2     yamt 			break;
    191        1.2     yamt 		}
    192       1.12       ad 		ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
    193        1.2     yamt 
    194        1.2     yamt 		uvmexp.pdscans++;
    195        1.2     yamt 
    196        1.2     yamt 		/*
    197        1.2     yamt 		 * move referenced pages back to active queue and
    198        1.2     yamt 		 * skip to next page.
    199        1.2     yamt 		 */
    200        1.2     yamt 
    201  1.12.20.1    rmind 		lock = uvmpd_trylockowner(pg);
    202  1.12.20.1    rmind 		if (lock != NULL) {
    203  1.12.20.1    rmind 			if (pmap_is_referenced(pg)) {
    204  1.12.20.1    rmind 				uvmpdpol_pageactivate(pg);
    205  1.12.20.1    rmind 				uvmexp.pdreact++;
    206  1.12.20.1    rmind 				mutex_exit(lock);
    207  1.12.20.1    rmind 				continue;
    208  1.12.20.1    rmind 			}
    209  1.12.20.1    rmind 			mutex_exit(lock);
    210        1.2     yamt 		}
    211        1.2     yamt 
    212        1.2     yamt 		anon = pg->uanon;
    213        1.2     yamt 		uobj = pg->uobject;
    214        1.2     yamt 
    215        1.2     yamt 		/*
    216        1.2     yamt 		 * enforce the minimum thresholds on different
    217        1.2     yamt 		 * types of memory usage.  if reusing the current
    218        1.2     yamt 		 * page would reduce that type of usage below its
    219        1.2     yamt 		 * minimum, reactivate the page instead and move
    220        1.2     yamt 		 * on to the next page.
    221        1.2     yamt 		 */
    222        1.2     yamt 
    223        1.2     yamt 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    224        1.2     yamt 			uvmpdpol_pageactivate(pg);
    225        1.2     yamt 			PDPOL_EVCNT_INCR(reactexec);
    226        1.2     yamt 			continue;
    227        1.2     yamt 		}
    228        1.2     yamt 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    229        1.2     yamt 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    230        1.2     yamt 			uvmpdpol_pageactivate(pg);
    231        1.2     yamt 			PDPOL_EVCNT_INCR(reactfile);
    232        1.2     yamt 			continue;
    233        1.2     yamt 		}
    234        1.2     yamt 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    235        1.2     yamt 			uvmpdpol_pageactivate(pg);
    236        1.2     yamt 			PDPOL_EVCNT_INCR(reactanon);
    237        1.2     yamt 			continue;
    238        1.2     yamt 		}
    239        1.2     yamt 
    240        1.2     yamt 		break;
    241        1.2     yamt 	}
    242        1.2     yamt 
    243        1.2     yamt 	return pg;
    244        1.2     yamt }
    245        1.2     yamt 
    246        1.2     yamt void
    247        1.2     yamt uvmpdpol_balancequeue(int swap_shortage)
    248        1.2     yamt {
    249        1.2     yamt 	int inactive_shortage;
    250        1.2     yamt 	struct vm_page *p, *nextpg;
    251  1.12.20.1    rmind 	kmutex_t *lock;
    252        1.2     yamt 
    253        1.2     yamt 	/*
    254        1.2     yamt 	 * we have done the scan to get free pages.   now we work on meeting
    255        1.2     yamt 	 * our inactive target.
    256        1.2     yamt 	 */
    257        1.2     yamt 
    258        1.2     yamt 	inactive_shortage = pdpol_state.s_inactarg - pdpol_state.s_inactive;
    259        1.2     yamt 	for (p = TAILQ_FIRST(&pdpol_state.s_activeq);
    260        1.2     yamt 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
    261        1.2     yamt 	     p = nextpg) {
    262       1.12       ad 		nextpg = TAILQ_NEXT(p, pageq.queue);
    263        1.2     yamt 
    264        1.2     yamt 		/*
    265        1.2     yamt 		 * if there's a shortage of swap slots, try to free it.
    266        1.2     yamt 		 */
    267        1.2     yamt 
    268        1.2     yamt 		if (swap_shortage > 0 && (p->pqflags & PQ_SWAPBACKED) != 0) {
    269        1.2     yamt 			if (uvmpd_trydropswap(p)) {
    270        1.2     yamt 				swap_shortage--;
    271        1.2     yamt 			}
    272        1.2     yamt 		}
    273        1.2     yamt 
    274        1.2     yamt 		/*
    275        1.2     yamt 		 * if there's a shortage of inactive pages, deactivate.
    276        1.2     yamt 		 */
    277        1.2     yamt 
    278  1.12.20.1    rmind 		if (inactive_shortage <= 0) {
    279  1.12.20.1    rmind 			continue;
    280  1.12.20.1    rmind 		}
    281  1.12.20.1    rmind 
    282  1.12.20.1    rmind 		/* no need to check wire_count as pg is "active" */
    283  1.12.20.1    rmind 		lock = uvmpd_trylockowner(p);
    284  1.12.20.1    rmind 		if (lock != NULL) {
    285        1.2     yamt 			uvmpdpol_pagedeactivate(p);
    286        1.2     yamt 			uvmexp.pddeact++;
    287        1.2     yamt 			inactive_shortage--;
    288  1.12.20.1    rmind 			mutex_exit(lock);
    289        1.2     yamt 		}
    290        1.2     yamt 	}
    291        1.2     yamt }
    292        1.2     yamt 
    293        1.2     yamt void
    294        1.2     yamt uvmpdpol_pagedeactivate(struct vm_page *pg)
    295        1.2     yamt {
    296        1.2     yamt 
    297  1.12.20.1    rmind 	KASSERT(uvm_page_locked_p(pg));
    298        1.9       ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    299  1.12.20.1    rmind 
    300        1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    301       1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue);
    302        1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    303        1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    304        1.2     yamt 		pdpol_state.s_active--;
    305        1.2     yamt 	}
    306        1.2     yamt 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    307        1.2     yamt 		KASSERT(pg->wire_count == 0);
    308       1.10     yamt 		pmap_clear_reference(pg);
    309       1.12       ad 		TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pageq.queue);
    310        1.2     yamt 		pg->pqflags |= PQ_INACTIVE;
    311        1.2     yamt 		pdpol_state.s_inactive++;
    312        1.2     yamt 	}
    313        1.2     yamt }
    314        1.2     yamt 
    315        1.2     yamt void
    316        1.2     yamt uvmpdpol_pageactivate(struct vm_page *pg)
    317        1.2     yamt {
    318        1.2     yamt 
    319        1.2     yamt 	uvmpdpol_pagedequeue(pg);
    320       1.12       ad 	TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pageq.queue);
    321        1.2     yamt 	pg->pqflags |= PQ_ACTIVE;
    322        1.2     yamt 	pdpol_state.s_active++;
    323        1.2     yamt }
    324        1.2     yamt 
    325        1.2     yamt void
    326        1.2     yamt uvmpdpol_pagedequeue(struct vm_page *pg)
    327        1.2     yamt {
    328        1.2     yamt 
    329        1.2     yamt 	if (pg->pqflags & PQ_ACTIVE) {
    330        1.9       ad 		KASSERT(mutex_owned(&uvm_pageqlock));
    331       1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pageq.queue);
    332        1.2     yamt 		pg->pqflags &= ~PQ_ACTIVE;
    333        1.2     yamt 		KASSERT(pdpol_state.s_active > 0);
    334        1.2     yamt 		pdpol_state.s_active--;
    335        1.2     yamt 	} else if (pg->pqflags & PQ_INACTIVE) {
    336        1.9       ad 		KASSERT(mutex_owned(&uvm_pageqlock));
    337       1.12       ad 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pageq.queue);
    338        1.2     yamt 		pg->pqflags &= ~PQ_INACTIVE;
    339        1.2     yamt 		KASSERT(pdpol_state.s_inactive > 0);
    340        1.2     yamt 		pdpol_state.s_inactive--;
    341        1.2     yamt 	}
    342        1.2     yamt }
    343        1.2     yamt 
    344        1.2     yamt void
    345        1.2     yamt uvmpdpol_pageenqueue(struct vm_page *pg)
    346        1.2     yamt {
    347        1.2     yamt 
    348        1.2     yamt 	uvmpdpol_pageactivate(pg);
    349        1.2     yamt }
    350        1.2     yamt 
    351        1.2     yamt void
    352        1.5     yamt uvmpdpol_anfree(struct vm_anon *an)
    353        1.2     yamt {
    354        1.2     yamt }
    355        1.2     yamt 
    356        1.7  thorpej bool
    357        1.2     yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
    358        1.2     yamt {
    359        1.2     yamt 
    360        1.2     yamt 	return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
    361        1.2     yamt }
    362        1.2     yamt 
    363        1.2     yamt void
    364        1.2     yamt uvmpdpol_estimatepageable(int *active, int *inactive)
    365        1.2     yamt {
    366        1.2     yamt 
    367        1.2     yamt 	if (active) {
    368        1.2     yamt 		*active = pdpol_state.s_active;
    369        1.2     yamt 	}
    370        1.2     yamt 	if (inactive) {
    371        1.2     yamt 		*inactive = pdpol_state.s_inactive;
    372        1.2     yamt 	}
    373        1.2     yamt }
    374        1.2     yamt 
    375        1.2     yamt #if !defined(PDSIM)
    376        1.2     yamt static int
    377        1.2     yamt min_check(struct uvm_pctparam *pct, int t)
    378        1.2     yamt {
    379        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    380        1.2     yamt 	int total = t;
    381        1.2     yamt 
    382        1.2     yamt 	if (pct != &s->s_anonmin) {
    383        1.2     yamt 		total += uvm_pctparam_get(&s->s_anonmin);
    384        1.2     yamt 	}
    385        1.2     yamt 	if (pct != &s->s_filemin) {
    386        1.2     yamt 		total += uvm_pctparam_get(&s->s_filemin);
    387        1.2     yamt 	}
    388        1.2     yamt 	if (pct != &s->s_execmin) {
    389        1.2     yamt 		total += uvm_pctparam_get(&s->s_execmin);
    390        1.2     yamt 	}
    391        1.2     yamt 	if (total > 95) {
    392        1.2     yamt 		return EINVAL;
    393        1.2     yamt 	}
    394        1.2     yamt 	return 0;
    395        1.2     yamt }
    396        1.2     yamt #endif /* !defined(PDSIM) */
    397        1.2     yamt 
    398        1.2     yamt void
    399        1.2     yamt uvmpdpol_init(void)
    400        1.2     yamt {
    401        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    402        1.2     yamt 
    403        1.2     yamt 	TAILQ_INIT(&s->s_activeq);
    404        1.2     yamt 	TAILQ_INIT(&s->s_inactiveq);
    405        1.2     yamt 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    406        1.2     yamt 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    407        1.2     yamt 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    408        1.2     yamt 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    409        1.2     yamt 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    410        1.2     yamt 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    411        1.2     yamt 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    412        1.2     yamt }
    413        1.2     yamt 
    414        1.2     yamt void
    415        1.2     yamt uvmpdpol_reinit(void)
    416        1.2     yamt {
    417        1.2     yamt }
    418        1.2     yamt 
    419        1.7  thorpej bool
    420        1.2     yamt uvmpdpol_needsscan_p(void)
    421        1.2     yamt {
    422        1.2     yamt 
    423        1.2     yamt 	return pdpol_state.s_inactive < pdpol_state.s_inactarg;
    424        1.2     yamt }
    425        1.2     yamt 
    426        1.2     yamt void
    427        1.2     yamt uvmpdpol_tune(void)
    428        1.2     yamt {
    429        1.2     yamt 
    430        1.2     yamt 	clock_tune();
    431        1.2     yamt }
    432        1.2     yamt 
    433        1.2     yamt #if !defined(PDSIM)
    434        1.2     yamt 
    435        1.2     yamt #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    436        1.2     yamt 
    437        1.2     yamt void
    438        1.2     yamt uvmpdpol_sysctlsetup(void)
    439        1.2     yamt {
    440        1.2     yamt 	struct uvmpdpol_globalstate *s = &pdpol_state;
    441        1.2     yamt 
    442        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    443        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    444        1.2     yamt 	    "for anonymous application data"));
    445        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    446        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    447       1.11   martin 	    "for cached file data"));
    448        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    449        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    450       1.11   martin 	    "for cached executable data"));
    451        1.2     yamt 
    452        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    453        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    454        1.2     yamt 	    "be reclaimed from other usage for "
    455        1.2     yamt 	    "anonymous application data"));
    456        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    457        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    458        1.2     yamt 	    "be reclaimed from other usage for cached "
    459        1.2     yamt 	    "file data"));
    460        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    461        1.2     yamt 	    SYSCTL_DESCR("Percentage of physical memory which will "
    462        1.2     yamt 	    "be reclaimed from other usage for cached "
    463        1.2     yamt 	    "executable data"));
    464        1.2     yamt 
    465        1.2     yamt 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    466        1.2     yamt 	    SYSCTL_DESCR("Percentage of inactive queue of "
    467        1.2     yamt 	    "the entire (active + inactive) queue"));
    468        1.2     yamt }
    469        1.2     yamt 
    470        1.2     yamt #endif /* !defined(PDSIM) */
    471        1.2     yamt 
    472        1.2     yamt #if defined(PDSIM)
    473        1.2     yamt void
    474        1.2     yamt pdsim_dump(const char *id)
    475        1.2     yamt {
    476        1.2     yamt #if defined(DEBUG)
    477        1.2     yamt 	/* XXX */
    478        1.2     yamt #endif /* defined(DEBUG) */
    479        1.2     yamt }
    480        1.2     yamt #endif /* defined(PDSIM) */
    481