Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.27
      1 /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.27 2019/12/31 13:07:14 ad Exp $	*/
      2 /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7  *
      8  * All rights reserved.
      9  *
     10  * This code is derived from software contributed to Berkeley by
     11  * The Mach Operating System project at Carnegie-Mellon University.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. Neither the name of the University nor the names of its contributors
     22  *    may be used to endorse or promote products derived from this software
     23  *    without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     38  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     39  *
     40  *
     41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     42  * All rights reserved.
     43  *
     44  * Permission to use, copy, modify and distribute this software and
     45  * its documentation is hereby granted, provided that both the copyright
     46  * notice and this permission notice appear in all copies of the
     47  * software, derivative works or modified versions, and any portions
     48  * thereof, and that both notices appear in supporting documentation.
     49  *
     50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     53  *
     54  * Carnegie Mellon requests users of this software to return to
     55  *
     56  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     57  *  School of Computer Science
     58  *  Carnegie Mellon University
     59  *  Pittsburgh PA 15213-3890
     60  *
     61  * any improvements or extensions that they make and grant Carnegie the
     62  * rights to redistribute these changes.
     63  */
     64 
     65 #if defined(PDSIM)
     66 
     67 #include "pdsim.h"
     68 
     69 #else /* defined(PDSIM) */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.27 2019/12/31 13:07:14 ad Exp $");
     73 
     74 #include <sys/param.h>
     75 #include <sys/proc.h>
     76 #include <sys/systm.h>
     77 #include <sys/kernel.h>
     78 
     79 #include <uvm/uvm.h>
     80 #include <uvm/uvm_pdpolicy.h>
     81 #include <uvm/uvm_pdpolicy_impl.h>
     82 #include <uvm/uvm_stat.h>
     83 
     84 #endif /* defined(PDSIM) */
     85 
     86 #define	PQ_TIME		0xfffffffc	/* time of last activation */
     87 #define PQ_INACTIVE	0x00000001	/* page is in inactive list */
     88 #define PQ_ACTIVE	0x00000002	/* page is in active list */
     89 
     90 #if !defined(CLOCK_INACTIVEPCT)
     91 #define	CLOCK_INACTIVEPCT	33
     92 #endif /* !defined(CLOCK_INACTIVEPCT) */
     93 
     94 struct uvmpdpol_globalstate {
     95 	kmutex_t lock;			/* lock on state */
     96 					/* <= compiler pads here */
     97 	struct pglist s_activeq		/* allocated pages, in use */
     98 	    __aligned(COHERENCY_UNIT);
     99 	struct pglist s_inactiveq;	/* pages between the clock hands */
    100 	int s_active;
    101 	int s_inactive;
    102 	int s_inactarg;
    103 	struct uvm_pctparam s_anonmin;
    104 	struct uvm_pctparam s_filemin;
    105 	struct uvm_pctparam s_execmin;
    106 	struct uvm_pctparam s_anonmax;
    107 	struct uvm_pctparam s_filemax;
    108 	struct uvm_pctparam s_execmax;
    109 	struct uvm_pctparam s_inactivepct;
    110 };
    111 
    112 struct uvmpdpol_scanstate {
    113 	bool ss_anonreact, ss_filereact, ss_execreact;
    114 	struct vm_page ss_marker;
    115 };
    116 
    117 static void	uvmpdpol_pageactivate_locked(struct vm_page *);
    118 static void	uvmpdpol_pagedeactivate_locked(struct vm_page *);
    119 static void	uvmpdpol_pagedequeue_locked(struct vm_page *);
    120 
    121 static struct uvmpdpol_globalstate pdpol_state __cacheline_aligned;
    122 static struct uvmpdpol_scanstate pdpol_scanstate;
    123 
    124 PDPOL_EVCNT_DEFINE(reactexec)
    125 PDPOL_EVCNT_DEFINE(reactfile)
    126 PDPOL_EVCNT_DEFINE(reactanon)
    127 
    128 static void
    129 clock_tune(void)
    130 {
    131 	struct uvmpdpol_globalstate *s = &pdpol_state;
    132 
    133 	s->s_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    134 	    s->s_active + s->s_inactive);
    135 	if (s->s_inactarg <= uvmexp.freetarg) {
    136 		s->s_inactarg = uvmexp.freetarg + 1;
    137 	}
    138 }
    139 
    140 void
    141 uvmpdpol_scaninit(void)
    142 {
    143 	struct uvmpdpol_globalstate *s = &pdpol_state;
    144 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    145 	int t;
    146 	bool anonunder, fileunder, execunder;
    147 	bool anonover, fileover, execover;
    148 	bool anonreact, filereact, execreact;
    149 	int64_t freepg, anonpg, filepg, execpg;
    150 
    151 	/*
    152 	 * decide which types of pages we want to reactivate instead of freeing
    153 	 * to keep usage within the minimum and maximum usage limits.
    154 	 */
    155 
    156 	cpu_count_sync_all();
    157 	freepg = uvm_availmem();
    158 	anonpg = cpu_count_get(CPU_COUNT_ANONPAGES);
    159 	filepg = cpu_count_get(CPU_COUNT_FILEPAGES);
    160 	execpg = cpu_count_get(CPU_COUNT_EXECPAGES);
    161 
    162 	mutex_enter(&s->lock);
    163 	t = s->s_active + s->s_inactive + freepg;
    164 	anonunder = anonpg <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    165 	fileunder = filepg <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    166 	execunder = execpg <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    167 	anonover = anonpg > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    168 	fileover = filepg > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    169 	execover = execpg > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    170 	anonreact = anonunder || (!anonover && (fileover || execover));
    171 	filereact = fileunder || (!fileover && (anonover || execover));
    172 	execreact = execunder || (!execover && (anonover || fileover));
    173 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    174 		anonreact = filereact = execreact = false;
    175 	}
    176 	ss->ss_anonreact = anonreact;
    177 	ss->ss_filereact = filereact;
    178 	ss->ss_execreact = execreact;
    179 	memset(&ss->ss_marker, 0, sizeof(ss->ss_marker));
    180 	ss->ss_marker.flags = PG_MARKER;
    181 	TAILQ_INSERT_HEAD(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
    182 	mutex_exit(&s->lock);
    183 }
    184 
    185 void
    186 uvmpdpol_scanfini(void)
    187 {
    188 	struct uvmpdpol_globalstate *s = &pdpol_state;
    189 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    190 
    191 	mutex_enter(&s->lock);
    192 	TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker, pdqueue);
    193 	mutex_exit(&s->lock);
    194 }
    195 
    196 struct vm_page *
    197 uvmpdpol_selectvictim(kmutex_t **plock)
    198 {
    199 	struct uvmpdpol_globalstate *s = &pdpol_state;
    200 	struct uvmpdpol_scanstate *ss = &pdpol_scanstate;
    201 	struct vm_page *pg;
    202 	kmutex_t *lock;
    203 
    204 	mutex_enter(&s->lock);
    205 	while (/* CONSTCOND */ 1) {
    206 		struct vm_anon *anon;
    207 		struct uvm_object *uobj;
    208 
    209 		pg = TAILQ_NEXT(&ss->ss_marker, pdqueue);
    210 		if (pg == NULL) {
    211 			break;
    212 		}
    213 		KASSERT((pg->flags & PG_MARKER) == 0);
    214 		uvmexp.pdscans++;
    215 
    216 		/*
    217 		 * acquire interlock to stablize page identity.
    218 		 * if we have caught the page in a state of flux
    219 		 * and it should be dequeued, do it now and then
    220 		 * move on to the next.
    221 		 */
    222 		mutex_enter(&pg->interlock);
    223 	        if ((pg->uobject == NULL && pg->uanon == NULL) ||
    224 	            pg->wire_count > 0) {
    225 	            	mutex_exit(&pg->interlock);
    226 	            	uvmpdpol_pagedequeue_locked(pg);
    227 	            	continue;
    228 		}
    229 
    230 		/*
    231 		 * now prepare to move on to the next page.
    232 		 */
    233 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, &ss->ss_marker,
    234 		    pdqueue);
    235 		TAILQ_INSERT_AFTER(&pdpol_state.s_inactiveq, pg,
    236 		    &ss->ss_marker, pdqueue);
    237 
    238 		/*
    239 		 * enforce the minimum thresholds on different
    240 		 * types of memory usage.  if reusing the current
    241 		 * page would reduce that type of usage below its
    242 		 * minimum, reactivate the page instead and move
    243 		 * on to the next page.
    244 		 */
    245 		anon = pg->uanon;
    246 		uobj = pg->uobject;
    247 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    248 			mutex_exit(&pg->interlock);
    249 			uvmpdpol_pageactivate_locked(pg);
    250 			PDPOL_EVCNT_INCR(reactexec);
    251 			continue;
    252 		}
    253 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    254 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    255 			mutex_exit(&pg->interlock);
    256 			uvmpdpol_pageactivate_locked(pg);
    257 			PDPOL_EVCNT_INCR(reactfile);
    258 			continue;
    259 		}
    260 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    261 			mutex_exit(&pg->interlock);
    262 			uvmpdpol_pageactivate_locked(pg);
    263 			PDPOL_EVCNT_INCR(reactanon);
    264 			continue;
    265 		}
    266 
    267 		/*
    268 		 * try to lock the object that owns the page.
    269 		 *
    270 		 * with the page interlock held, we can drop s->lock, which
    271 		 * could otherwise serve as a barrier to us getting the
    272 		 * object locked, because the owner of the object's lock may
    273 		 * be blocked on s->lock (i.e. a deadlock).
    274 		 *
    275 		 * whatever happens, uvmpd_trylockowner() will release the
    276 		 * interlock.  with the interlock dropped we can then
    277 		 * re-acquire our own lock.  the order is:
    278 		 *
    279 		 *	object -> pdpol -> interlock.
    280 	         */
    281 	        mutex_exit(&s->lock);
    282         	lock = uvmpd_trylockowner(pg);
    283         	/* pg->interlock now released */
    284         	mutex_enter(&s->lock);
    285 		if (lock == NULL) {
    286 			/* didn't get it - try the next page. */
    287 			continue;
    288 		}
    289 
    290 		/*
    291 		 * move referenced pages back to active queue and skip to
    292 		 * next page.
    293 		 */
    294 		if (pmap_is_referenced(pg)) {
    295 			uvmpdpol_pageactivate_locked(pg);
    296 			uvmexp.pdreact++;
    297 			mutex_exit(lock);
    298 			continue;
    299 		}
    300 
    301 		/* we have a potential victim. */
    302 		*plock = lock;
    303 		break;
    304 	}
    305 	mutex_exit(&s->lock);
    306 	return pg;
    307 }
    308 
    309 void
    310 uvmpdpol_balancequeue(int swap_shortage)
    311 {
    312 	struct uvmpdpol_globalstate *s = &pdpol_state;
    313 	int inactive_shortage;
    314 	struct vm_page *p, marker;
    315 	kmutex_t *lock;
    316 
    317 	/*
    318 	 * we have done the scan to get free pages.   now we work on meeting
    319 	 * our inactive target.
    320 	 */
    321 
    322 	memset(&marker, 0, sizeof(marker));
    323 	marker.flags = PG_MARKER;
    324 
    325 	mutex_enter(&s->lock);
    326 	TAILQ_INSERT_HEAD(&pdpol_state.s_activeq, &marker, pdqueue);
    327 	for (;;) {
    328 		inactive_shortage =
    329 		    pdpol_state.s_inactarg - pdpol_state.s_inactive;
    330 		if (inactive_shortage <= 0 && swap_shortage <= 0) {
    331 			break;
    332 		}
    333 		p = TAILQ_NEXT(&marker, pdqueue);
    334 		if (p == NULL) {
    335 			break;
    336 		}
    337 		KASSERT((p->flags & PG_MARKER) == 0);
    338 
    339 		/*
    340 		 * acquire interlock to stablize page identity.
    341 		 * if we have caught the page in a state of flux
    342 		 * and it should be dequeued, do it now and then
    343 		 * move on to the next.
    344 		 */
    345 		mutex_enter(&p->interlock);
    346 	        if ((p->uobject == NULL && p->uanon == NULL) ||
    347 	            p->wire_count > 0) {
    348 	            	mutex_exit(&p->interlock);
    349 	            	uvmpdpol_pagedequeue_locked(p);
    350 	            	continue;
    351 		}
    352 
    353 		/*
    354 		 * now prepare to move on to the next page.
    355 		 */
    356 		TAILQ_REMOVE(&pdpol_state.s_activeq, &marker, pdqueue);
    357 		TAILQ_INSERT_AFTER(&pdpol_state.s_activeq, p, &marker,
    358 		    pdqueue);
    359 
    360 		/*
    361 		 * try to lock the object that owns the page.  see comments
    362 		 * in uvmpdol_selectvictim().
    363 	         */
    364 	        mutex_exit(&s->lock);
    365         	lock = uvmpd_trylockowner(p);
    366         	/* p->interlock now released */
    367         	mutex_enter(&s->lock);
    368 		if (lock == NULL) {
    369 			/* didn't get it - try the next page. */
    370 			continue;
    371 		}
    372 
    373 		/*
    374 		 * if there's a shortage of swap slots, try to free it.
    375 		 */
    376 		if (swap_shortage > 0 && (p->flags & PG_SWAPBACKED) != 0 &&
    377 		    (p->flags & PG_BUSY) == 0) {
    378 			if (uvmpd_dropswap(p)) {
    379 				swap_shortage--;
    380 			}
    381 		}
    382 
    383 		/*
    384 		 * if there's a shortage of inactive pages, deactivate.
    385 		 */
    386 		if (inactive_shortage > 0) {
    387 			uvmpdpol_pagedeactivate_locked(p);
    388 			uvmexp.pddeact++;
    389 			inactive_shortage--;
    390 		}
    391 		mutex_exit(lock);
    392 	}
    393 	TAILQ_REMOVE(&pdpol_state.s_activeq, &marker, pdqueue);
    394 	mutex_exit(&s->lock);
    395 }
    396 
    397 static void
    398 uvmpdpol_pagedeactivate_locked(struct vm_page *pg)
    399 {
    400 
    401 	KASSERT(uvm_page_owner_locked_p(pg));
    402 
    403 	if (pg->pqflags & PQ_ACTIVE) {
    404 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
    405 		pg->pqflags &= ~(PQ_ACTIVE | PQ_TIME);
    406 		KASSERT(pdpol_state.s_active > 0);
    407 		pdpol_state.s_active--;
    408 	}
    409 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    410 		KASSERT(pg->wire_count == 0);
    411 		pmap_clear_reference(pg);
    412 		TAILQ_INSERT_TAIL(&pdpol_state.s_inactiveq, pg, pdqueue);
    413 		pg->pqflags |= PQ_INACTIVE;
    414 		pdpol_state.s_inactive++;
    415 	}
    416 }
    417 
    418 void
    419 uvmpdpol_pagedeactivate(struct vm_page *pg)
    420 {
    421 	struct uvmpdpol_globalstate *s = &pdpol_state;
    422 
    423 	mutex_enter(&s->lock);
    424 	uvmpdpol_pagedeactivate_locked(pg);
    425 	mutex_exit(&s->lock);
    426 }
    427 
    428 static void
    429 uvmpdpol_pageactivate_locked(struct vm_page *pg)
    430 {
    431 
    432 	uvmpdpol_pagedequeue_locked(pg);
    433 	TAILQ_INSERT_TAIL(&pdpol_state.s_activeq, pg, pdqueue);
    434 	pg->pqflags = PQ_ACTIVE | (hardclock_ticks & PQ_TIME);
    435 	pdpol_state.s_active++;
    436 }
    437 
    438 void
    439 uvmpdpol_pageactivate(struct vm_page *pg)
    440 {
    441 	struct uvmpdpol_globalstate *s = &pdpol_state;
    442 
    443 	/* Safety: PQ_ACTIVE clear also tells us if it is not enqueued. */
    444 	if ((pg->pqflags & PQ_ACTIVE) == 0 ||
    445 	    ((hardclock_ticks & PQ_TIME) - (pg->pqflags & PQ_TIME)) >= hz) {
    446 		mutex_enter(&s->lock);
    447 		uvmpdpol_pageactivate_locked(pg);
    448 		mutex_exit(&s->lock);
    449 	}
    450 }
    451 
    452 static void
    453 uvmpdpol_pagedequeue_locked(struct vm_page *pg)
    454 {
    455 
    456 	if (pg->pqflags & PQ_ACTIVE) {
    457 		TAILQ_REMOVE(&pdpol_state.s_activeq, pg, pdqueue);
    458 		pg->pqflags &= ~(PQ_ACTIVE | PQ_TIME);
    459 		KASSERT(pdpol_state.s_active > 0);
    460 		pdpol_state.s_active--;
    461 	} else if (pg->pqflags & PQ_INACTIVE) {
    462 		TAILQ_REMOVE(&pdpol_state.s_inactiveq, pg, pdqueue);
    463 		pg->pqflags &= ~PQ_INACTIVE;
    464 		KASSERT(pdpol_state.s_inactive > 0);
    465 		pdpol_state.s_inactive--;
    466 	}
    467 }
    468 
    469 void
    470 uvmpdpol_pagedequeue(struct vm_page *pg)
    471 {
    472 	struct uvmpdpol_globalstate *s = &pdpol_state;
    473 
    474 	mutex_enter(&s->lock);
    475 	uvmpdpol_pagedequeue_locked(pg);
    476 	mutex_exit(&s->lock);
    477 }
    478 
    479 void
    480 uvmpdpol_pageenqueue(struct vm_page *pg)
    481 {
    482 	struct uvmpdpol_globalstate *s = &pdpol_state;
    483 
    484 	mutex_enter(&s->lock);
    485 	uvmpdpol_pageactivate_locked(pg);
    486 	mutex_exit(&s->lock);
    487 }
    488 
    489 void
    490 uvmpdpol_anfree(struct vm_anon *an)
    491 {
    492 }
    493 
    494 bool
    495 uvmpdpol_pageisqueued_p(struct vm_page *pg)
    496 {
    497 
    498 	/* Safe to test unlocked due to page life-cycle. */
    499 	return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
    500 }
    501 
    502 void
    503 uvmpdpol_estimatepageable(int *active, int *inactive)
    504 {
    505 	struct uvmpdpol_globalstate *s = &pdpol_state;
    506 
    507 	mutex_enter(&s->lock);
    508 	if (active) {
    509 		*active = pdpol_state.s_active;
    510 	}
    511 	if (inactive) {
    512 		*inactive = pdpol_state.s_inactive;
    513 	}
    514 	mutex_exit(&s->lock);
    515 }
    516 
    517 #if !defined(PDSIM)
    518 static int
    519 min_check(struct uvm_pctparam *pct, int t)
    520 {
    521 	struct uvmpdpol_globalstate *s = &pdpol_state;
    522 	int total = t;
    523 
    524 	if (pct != &s->s_anonmin) {
    525 		total += uvm_pctparam_get(&s->s_anonmin);
    526 	}
    527 	if (pct != &s->s_filemin) {
    528 		total += uvm_pctparam_get(&s->s_filemin);
    529 	}
    530 	if (pct != &s->s_execmin) {
    531 		total += uvm_pctparam_get(&s->s_execmin);
    532 	}
    533 	if (total > 95) {
    534 		return EINVAL;
    535 	}
    536 	return 0;
    537 }
    538 #endif /* !defined(PDSIM) */
    539 
    540 void
    541 uvmpdpol_init(void)
    542 {
    543 	struct uvmpdpol_globalstate *s = &pdpol_state;
    544 
    545 	mutex_init(&s->lock, MUTEX_DEFAULT, IPL_NONE);
    546 	TAILQ_INIT(&s->s_activeq);
    547 	TAILQ_INIT(&s->s_inactiveq);
    548 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    549 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    550 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    551 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    552 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    553 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    554 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    555 }
    556 
    557 void
    558 uvmpdpol_reinit(void)
    559 {
    560 }
    561 
    562 bool
    563 uvmpdpol_needsscan_p(void)
    564 {
    565 
    566 	/* This must be an unlocked check: can be called from interrupt. */
    567 	return pdpol_state.s_inactive < pdpol_state.s_inactarg;
    568 }
    569 
    570 void
    571 uvmpdpol_tune(void)
    572 {
    573 	struct uvmpdpol_globalstate *s = &pdpol_state;
    574 
    575 	mutex_enter(&s->lock);
    576 	clock_tune();
    577 	mutex_exit(&s->lock);
    578 }
    579 
    580 #if !defined(PDSIM)
    581 
    582 #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    583 
    584 void
    585 uvmpdpol_sysctlsetup(void)
    586 {
    587 	struct uvmpdpol_globalstate *s = &pdpol_state;
    588 
    589 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    590 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    591 	    "for anonymous application data"));
    592 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    593 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    594 	    "for cached file data"));
    595 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    596 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    597 	    "for cached executable data"));
    598 
    599 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    600 	    SYSCTL_DESCR("Percentage of physical memory which will "
    601 	    "be reclaimed from other usage for "
    602 	    "anonymous application data"));
    603 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    604 	    SYSCTL_DESCR("Percentage of physical memory which will "
    605 	    "be reclaimed from other usage for cached "
    606 	    "file data"));
    607 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    608 	    SYSCTL_DESCR("Percentage of physical memory which will "
    609 	    "be reclaimed from other usage for cached "
    610 	    "executable data"));
    611 
    612 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    613 	    SYSCTL_DESCR("Percentage of inactive queue of "
    614 	    "the entire (active + inactive) queue"));
    615 }
    616 
    617 #endif /* !defined(PDSIM) */
    618 
    619 #if defined(PDSIM)
    620 void
    621 pdsim_dump(const char *id)
    622 {
    623 #if defined(DEBUG)
    624 	/* XXX */
    625 #endif /* defined(DEBUG) */
    626 }
    627 #endif /* defined(PDSIM) */
    628