Home | History | Annotate | Line # | Download | only in uvm
uvm_pdpolicy_clock.c revision 1.12.16.6
      1 /*	$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.6 2012/04/12 19:39:55 matt Exp $	*/
      2 /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
      3 
      4 /*
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * Copyright (c) 1991, 1993, The Regents of the University of California.
      7  *
      8  * All rights reserved.
      9  *
     10  * This code is derived from software contributed to Berkeley by
     11  * The Mach Operating System project at Carnegie-Mellon University.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by Charles D. Cranor,
     24  *      Washington University, the University of California, Berkeley and
     25  *      its contributors.
     26  * 4. Neither the name of the University nor the names of its contributors
     27  *    may be used to endorse or promote products derived from this software
     28  *    without specific prior written permission.
     29  *
     30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     40  * SUCH DAMAGE.
     41  *
     42  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     43  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     44  *
     45  *
     46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     47  * All rights reserved.
     48  *
     49  * Permission to use, copy, modify and distribute this software and
     50  * its documentation is hereby granted, provided that both the copyright
     51  * notice and this permission notice appear in all copies of the
     52  * software, derivative works or modified versions, and any portions
     53  * thereof, and that both notices appear in supporting documentation.
     54  *
     55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     58  *
     59  * Carnegie Mellon requests users of this software to return to
     60  *
     61  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     62  *  School of Computer Science
     63  *  Carnegie Mellon University
     64  *  Pittsburgh PA 15213-3890
     65  *
     66  * any improvements or extensions that they make and grant Carnegie the
     67  * rights to redistribute these changes.
     68  */
     69 
     70 #if defined(PDSIM)
     71 
     72 #include "pdsim.h"
     73 
     74 #else /* defined(PDSIM) */
     75 
     76 #include <sys/cdefs.h>
     77 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.6 2012/04/12 19:39:55 matt Exp $");
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/kernel.h>
     83 
     84 #include <uvm/uvm.h>
     85 #include <uvm/uvm_pdpolicy.h>
     86 #include <uvm/uvm_pdpolicy_impl.h>
     87 
     88 #endif /* defined(PDSIM) */
     89 
     90 #define PQ_INACTIVE	PQ_PRIVATE1	/* page is in inactive list */
     91 #define PQ_ACTIVE	PQ_PRIVATE2	/* page is in active list */
     92 #define	PQ_RADIOACTIVE	PQ_PRIVATE3	/* page is in radioactive list */
     93 
     94 #if !defined(CLOCK_INACTIVEPCT)
     95 #define	CLOCK_INACTIVEPCT	33
     96 #endif /* !defined(CLOCK_INACTIVEPCT) */
     97 
     98 struct uvmpdpol_scanstate {
     99 	struct vm_page *ss_nextpg;
    100 	bool ss_first;
    101 	bool ss_anonreact, ss_filereact, ss_execreact;
    102 };
    103 
    104 struct uvmpdpol_groupstate {
    105 	struct pglist gs_activeq;	/* allocated pages, in use */
    106 	struct pglist gs_inactiveq;	/* pages between the clock hands */
    107 	struct pglist gs_radioactiveq;	/* allocated pages, in use */
    108 	u_int gs_active;
    109 	u_int gs_radioactive;
    110 	u_int gs_inactive;
    111 	u_int gs_inactarg;
    112 	struct uvmpdpol_scanstate gs_scanstate;
    113 };
    114 
    115 struct uvmpdpol_globalstate {
    116 	struct uvmpdpol_groupstate *s_pggroups;
    117 	struct uvm_pctparam s_anonmin;
    118 	struct uvm_pctparam s_filemin;
    119 	struct uvm_pctparam s_execmin;
    120 	struct uvm_pctparam s_anonmax;
    121 	struct uvm_pctparam s_filemax;
    122 	struct uvm_pctparam s_execmax;
    123 	struct uvm_pctparam s_inactivepct;
    124 };
    125 
    126 
    127 static struct uvmpdpol_globalstate pdpol_state;
    128 
    129 PDPOL_EVCNT_DEFINE(reactexec)
    130 PDPOL_EVCNT_DEFINE(reactfile)
    131 PDPOL_EVCNT_DEFINE(reactanon)
    132 
    133 #ifdef DEBUG
    134 static size_t
    135 clock_pglist_count(struct pglist *pglist)
    136 {
    137 	size_t count = 0;
    138 	struct vm_page *pg;
    139 	TAILQ_FOREACH(pg, pglist, pageq.queue) {
    140 		count++;
    141 	}
    142 	return count;
    143 }
    144 #endif
    145 
    146 static size_t
    147 clock_space(void)
    148 {
    149 	return sizeof(struct uvmpdpol_groupstate);
    150 }
    151 
    152 static void
    153 clock_tune(struct uvm_pggroup *grp)
    154 {
    155 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    156 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    157 
    158 	gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
    159 	    gs->gs_active + gs->gs_inactive);
    160 	if (gs->gs_inactarg <= grp->pgrp_freetarg) {
    161 		gs->gs_inactarg = grp->pgrp_freetarg + 1;
    162 	}
    163 }
    164 
    165 void
    166 uvmpdpol_scaninit(struct uvm_pggroup *grp)
    167 {
    168 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    169 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    170 	struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
    171 	bool anonunder, fileunder, execunder;
    172 	bool anonover, fileover, execover;
    173 	bool anonreact, filereact, execreact;
    174 
    175 	/*
    176 	 * decide which types of pages we want to reactivate instead of freeing
    177 	 * to keep usage within the minimum and maximum usage limits.
    178 	 */
    179 
    180 	u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
    181 	anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
    182 	fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
    183 	execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
    184 	anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
    185 	fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
    186 	execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
    187 	anonreact = anonunder || (!anonover && (fileover || execover));
    188 	filereact = fileunder || (!fileover && (anonover || execover));
    189 	execreact = execunder || (!execover && (anonover || fileover));
    190 	if (filereact && execreact && (anonreact || uvm_swapisfull())) {
    191 		anonreact = filereact = execreact = false;
    192 	}
    193 	ss->ss_anonreact = anonreact;
    194 	ss->ss_filereact = filereact;
    195 	ss->ss_execreact = execreact;
    196 
    197 	ss->ss_first = true;
    198 }
    199 
    200 struct vm_page *
    201 uvmpdpol_selectvictim(struct uvm_pggroup *grp)
    202 {
    203 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    204 	struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
    205 	struct vm_page *pg;
    206 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    207 
    208 	KASSERT(mutex_owned(&uvm_pageqlock));
    209 
    210 	while (/* CONSTCOND */ 1) {
    211 		struct vm_anon *anon;
    212 		struct uvm_object *uobj;
    213 
    214 		//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    215 
    216 		if (ss->ss_first) {
    217 			pg = TAILQ_FIRST(&gs->gs_inactiveq);
    218 			ss->ss_first = false;
    219 			UVMHIST_LOG(pdhist, "  select first inactive page: %p",
    220 			    pg, 0, 0, 0);
    221 		} else {
    222 			pg = ss->ss_nextpg;
    223 			if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
    224 				pg = TAILQ_FIRST(&gs->gs_inactiveq);
    225 			}
    226 			UVMHIST_LOG(pdhist, "  select next inactive page: %p",
    227 			    pg, 0, 0, 0);
    228 		}
    229 		if (pg == NULL) {
    230 			break;
    231 		}
    232 		ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
    233 
    234 		grp->pgrp_pdscans++;
    235 
    236 		/*
    237 		 * move referenced pages back to active queue and
    238 		 * skip to next page.
    239 		 */
    240 
    241 		if (pmap_is_referenced(pg)) {
    242 			uvmpdpol_pageactivate(pg);
    243 			grp->pgrp_pdreact++;
    244 			continue;
    245 		}
    246 
    247 		anon = pg->uanon;
    248 		uobj = pg->uobject;
    249 
    250 		/*
    251 		 * enforce the minimum thresholds on different
    252 		 * types of memory usage.  if reusing the current
    253 		 * page would reduce that type of usage below its
    254 		 * minimum, reactivate the page instead and move
    255 		 * on to the next page.
    256 		 */
    257 
    258 		if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
    259 			uvmpdpol_pageactivate(pg);
    260 			PDPOL_EVCNT_INCR(reactexec);
    261 			continue;
    262 		}
    263 		if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
    264 		    !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
    265 			uvmpdpol_pageactivate(pg);
    266 			PDPOL_EVCNT_INCR(reactfile);
    267 			continue;
    268 		}
    269 		if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
    270 			uvmpdpol_pageactivate(pg);
    271 			PDPOL_EVCNT_INCR(reactanon);
    272 			continue;
    273 		}
    274 
    275 		break;
    276 	}
    277 
    278 	return pg;
    279 }
    280 
    281 void
    282 uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
    283 {
    284 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    285 
    286 	struct vm_page *pg, *nextpg;
    287 
    288 	/*
    289 	 * we have done the scan to get free pages.   now we work on meeting
    290 	 * our inactive target.
    291 	 */
    292 
    293 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    294 
    295 	/*
    296 	 * If swap was added, move all the pages from radioactive queue to the
    297 	 * active queue.
    298 	 */
    299 #ifdef VMSWAP
    300 	if (uvmexp.nswapdev > 0) {
    301 		while ((pg = TAILQ_FIRST(&gs->gs_radioactiveq)) != NULL) {
    302 			uvmpdpol_pageactivate(pg);
    303 		}
    304 	}
    305 #endif
    306 
    307 	u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
    308 	for (pg = TAILQ_FIRST(&gs->gs_activeq);
    309 	     pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
    310 	     pg = nextpg) {
    311 		nextpg = TAILQ_NEXT(pg, pageq.queue);
    312 
    313 		/*
    314 		 * if there's a shortage of swap slots, try to free it.
    315 		 */
    316 
    317 #ifdef VMSWAP
    318 		if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    319 			if (uvmpd_trydropswap(pg)) {
    320 				swap_shortage--;
    321 			}
    322 		}
    323 #endif
    324 
    325 		/*
    326 		 * if there's a shortage of inactive pages, deactivate.
    327 		 */
    328 		if (inactive_shortage > 0) {
    329 			/* no need to check wire_count as pg is "active" */
    330 			uvmpdpol_pagedeactivate(pg);
    331 			grp->pgrp_pddeact++;
    332 			inactive_shortage--;
    333 		}
    334 	}
    335 
    336 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    337 }
    338 
    339 void
    340 uvmpdpol_pagedeactivate(struct vm_page *pg)
    341 {
    342 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    343 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    344 
    345 #if 0
    346 	/*
    347 	 * If there is no swap available and the page is anonymous without
    348 	 * a backing store, don't bother marking it INACTIVE since it would
    349 	 * only be a "dirty reactivation".
    350 	 */
    351 	if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    352 		KASSERT(pg->pqflags & PQ_RADIOACTIVE);
    353 		return;
    354 	}
    355 #else
    356 	KASSERT((pg->pqflags & PQ_RADIOACTIVE) == 0);
    357 #endif
    358 
    359 	KASSERT(!(pg->pqflags & PQ_FREE));
    360 	KASSERT(mutex_owned(&uvm_pageqlock));
    361 
    362 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    363 
    364 	if (pg->pqflags & PQ_ACTIVE) {
    365 		TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
    366 		pg->pqflags &= ~PQ_ACTIVE;
    367 		KASSERT(gs->gs_active > 0);
    368 		gs->gs_active--;
    369 		grp->pgrp_active--;
    370 	}
    371 
    372 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    373 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    374 
    375 	if (pg->pqflags & PQ_RADIOACTIVE) {
    376 		TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
    377 		pg->pqflags &= ~PQ_RADIOACTIVE;
    378 		KASSERT(gs->gs_radioactive > 0);
    379 		gs->gs_radioactive--;
    380 		grp->pgrp_active--;
    381 	}
    382 
    383 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    384 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    385 
    386 	if ((pg->pqflags & PQ_INACTIVE) == 0) {
    387 		KASSERT(pg->wire_count == 0);
    388 		pmap_clear_reference(pg);
    389 		TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
    390 		pg->pqflags |= PQ_INACTIVE;
    391 		gs->gs_inactive++;
    392 		grp->pgrp_inactive++;
    393 	}
    394 
    395 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    396 }
    397 
    398 void
    399 uvmpdpol_pageactivate(struct vm_page *pg)
    400 {
    401 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    402 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    403 
    404 	KASSERT(!(pg->pqflags & PQ_FREE));
    405 	KASSERT(mutex_owned(&uvm_pageqlock));
    406 
    407 	uvmpdpol_pagedequeue(pg);
    408 	if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
    409 		TAILQ_INSERT_TAIL(&gs->gs_radioactiveq, pg, pageq.queue);
    410 		pg->pqflags |= PQ_RADIOACTIVE;
    411 		gs->gs_radioactive++;
    412 	} else {
    413 		TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
    414 		pg->pqflags |= PQ_ACTIVE;
    415 		gs->gs_active++;
    416 		grp->pgrp_active++;
    417 	}
    418 
    419 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    420 }
    421 
    422 void
    423 uvmpdpol_pagedequeue(struct vm_page *pg)
    424 {
    425 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    426 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    427 
    428 	KASSERT(!(pg->pqflags & PQ_FREE));
    429 	KASSERT(mutex_owned(&uvm_pageqlock));
    430 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    431 
    432 	if (pg->pqflags & PQ_RADIOACTIVE) {
    433 		TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
    434 		pg->pqflags &= ~PQ_RADIOACTIVE;
    435 		KASSERT(gs->gs_radioactive > 0);
    436 		gs->gs_radioactive--;
    437 		grp->pgrp_active--;
    438 	}
    439 
    440 	//KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
    441 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    442 
    443 	if (pg->pqflags & PQ_ACTIVE) {
    444 		TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
    445 		pg->pqflags &= ~PQ_ACTIVE;
    446 		KASSERT(gs->gs_active > 0);
    447 		gs->gs_active--;
    448 		grp->pgrp_active--;
    449 	}
    450 
    451 	//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    452 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    453 
    454 	if (pg->pqflags & PQ_INACTIVE) {
    455 		TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
    456 		pg->pqflags &= ~PQ_INACTIVE;
    457 		KASSERT(gs->gs_inactive > 0);
    458 		gs->gs_inactive--;
    459 		grp->pgrp_inactive--;
    460 	}
    461 
    462 	//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    463 }
    464 
    465 void
    466 uvmpdpol_pageenqueue(struct vm_page *pg)
    467 {
    468 
    469 	uvmpdpol_pageactivate(pg);
    470 }
    471 
    472 void
    473 uvmpdpol_anfree(struct vm_anon *an)
    474 {
    475 }
    476 
    477 bool
    478 uvmpdpol_pageisqueued_p(struct vm_page *pg)
    479 {
    480 
    481 	return (pg->pqflags & (PQ_RADIOACTIVE | PQ_ACTIVE | PQ_INACTIVE)) != 0;
    482 }
    483 
    484 void
    485 uvmpdpol_estimatepageable(const struct uvm_pggroup *grp,
    486 	u_int *activep, u_int *inactivep)
    487 {
    488 	if (grp != NULL) {
    489 		struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    490 		if (activep) {
    491 			*activep += gs->gs_active + gs->gs_radioactive;
    492 		}
    493 		if (inactivep) {
    494 			*inactivep = gs->gs_inactive;
    495 		}
    496 		return;
    497 	}
    498 
    499 	u_int active = 0, inactive = 0;
    500 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    501 		struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    502 
    503 		//KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
    504 		//KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
    505 
    506 		active += gs->gs_active + gs->gs_radioactive;
    507 		inactive += gs->gs_inactive;
    508 	}
    509 
    510 	if (activep) {
    511 		*activep = active;
    512 	}
    513 	if (inactivep) {
    514 		*inactivep = inactive;
    515 	}
    516 }
    517 
    518 #if !defined(PDSIM)
    519 static int
    520 min_check(struct uvm_pctparam *pct, int t)
    521 {
    522 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    523 	u_int total = t;
    524 
    525 	if (pct != &s->s_anonmin) {
    526 		total += uvm_pctparam_get(&s->s_anonmin);
    527 	}
    528 	if (pct != &s->s_filemin) {
    529 		total += uvm_pctparam_get(&s->s_filemin);
    530 	}
    531 	if (pct != &s->s_execmin) {
    532 		total += uvm_pctparam_get(&s->s_execmin);
    533 	}
    534 	if (total > 95) {
    535 		return EINVAL;
    536 	}
    537 	return 0;
    538 }
    539 #endif /* !defined(PDSIM) */
    540 
    541 void
    542 uvmpdpol_init(void *new_gs, size_t npggroups)
    543 {
    544 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    545 	struct uvmpdpol_groupstate *gs = new_gs;
    546 
    547 	s->s_pggroups = gs;
    548 
    549 	struct uvm_pggroup *grp = uvm.pggroups;
    550 	for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
    551 		TAILQ_INIT(&gs->gs_activeq);
    552 		TAILQ_INIT(&gs->gs_inactiveq);
    553 		TAILQ_INIT(&gs->gs_radioactiveq);
    554 		grp->pgrp_gs = gs;
    555 		KASSERT(gs->gs_active == 0);
    556 		KASSERT(gs->gs_inactive == 0);
    557 		KASSERT(gs->gs_radioactive == 0);
    558 		KASSERT(grp->pgrp_active == 0);
    559 		KASSERT(grp->pgrp_inactive == 0);
    560 	}
    561 	uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
    562 	uvm_pctparam_init(&s->s_anonmin, 10, min_check);
    563 	uvm_pctparam_init(&s->s_filemin, 10, min_check);
    564 	uvm_pctparam_init(&s->s_execmin,  5, min_check);
    565 	uvm_pctparam_init(&s->s_anonmax, 80, NULL);
    566 	uvm_pctparam_init(&s->s_filemax, 50, NULL);
    567 	uvm_pctparam_init(&s->s_execmax, 30, NULL);
    568 
    569 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    570 		uvmpdpol_scaninit(grp);
    571 	}
    572 }
    573 
    574 void
    575 uvmpdpol_reinit(void)
    576 {
    577 }
    578 
    579 bool
    580 uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
    581 {
    582 	struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
    583 
    584 	return gs->gs_inactive < gs->gs_inactarg;
    585 }
    586 
    587 void
    588 uvmpdpol_tune(struct uvm_pggroup *grp)
    589 {
    590 
    591 	clock_tune(grp);
    592 }
    593 
    594 size_t
    595 uvmpdpol_space(void)
    596 {
    597 
    598 	return clock_space();
    599 }
    600 
    601 void
    602 uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
    603 	size_t npggroups, size_t old_ncolors)
    604 {
    605 	struct uvmpdpol_globalstate * const s = &pdpol_state;
    606 	struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
    607 	struct uvmpdpol_groupstate * const gs = new_gs;
    608 
    609 	s->s_pggroups = gs;
    610 
    611 	for (size_t i = 0; i < npggroups; i++) {
    612 		struct uvmpdpol_groupstate * const dst_gs = &gs[i];
    613 		TAILQ_INIT(&dst_gs->gs_activeq);
    614 		TAILQ_INIT(&dst_gs->gs_inactiveq);
    615 		TAILQ_INIT(&dst_gs->gs_radioactiveq);
    616 		uvm.pggroups[i].pgrp_gs = dst_gs;
    617 	}
    618 
    619 	const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
    620 	for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
    621 		struct vm_page *pg;
    622 		KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
    623 		while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
    624 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    625 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    626 
    627 			TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
    628 			src_gs->gs_inactive--;
    629 			xgs->gs_inactive++;
    630 			uvm.pggroups[pggroup].pgrp_inactive++;
    631 			KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
    632 		}
    633 		KASSERT(src_gs->gs_inactive == 0);
    634 
    635 		KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
    636 		while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
    637 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    638 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    639 
    640 			TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
    641 			src_gs->gs_active--;
    642 			xgs->gs_active++;
    643 			KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
    644 			uvm.pggroups[pggroup].pgrp_active++;
    645 		}
    646 		KASSERT(src_gs->gs_active == 0);
    647 
    648 		KDASSERT(src_gs->gs_radioactive == clock_pglist_count(&src_gs->gs_radioactiveq));
    649 		while ((pg = TAILQ_FIRST(&src_gs->gs_radioactiveq)) != NULL) {
    650 			u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
    651 			struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
    652 
    653 			TAILQ_INSERT_TAIL(&xgs->gs_radioactiveq, pg, pageq.queue);
    654 			src_gs->gs_radioactive--;
    655 			xgs->gs_radioactive++;
    656 			KDASSERT(xgs->gs_radioactive == clock_pglist_count(&xgs->gs_radioactiveq));
    657 			uvm.pggroups[pggroup].pgrp_active++;
    658 		}
    659 		KASSERT(src_gs->gs_active == 0);
    660 	}
    661 
    662 	struct uvm_pggroup *grp;
    663 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    664 		clock_tune(grp);
    665 		uvmpdpol_scaninit(grp);
    666 	}
    667 }
    668 
    669 #if !defined(PDSIM)
    670 
    671 #include <sys/sysctl.h>	/* XXX SYSCTL_DESCR */
    672 
    673 void
    674 uvmpdpol_sysctlsetup(void)
    675 {
    676 	struct uvmpdpol_globalstate *s = &pdpol_state;
    677 
    678 	uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
    679 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    680 	    "for anonymous application data"));
    681 	uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
    682 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    683 	    "for cached file data"));
    684 	uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
    685 	    SYSCTL_DESCR("Percentage of physical memory reserved "
    686 	    "for cached executable data"));
    687 
    688 	uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
    689 	    SYSCTL_DESCR("Percentage of physical memory which will "
    690 	    "be reclaimed from other usage for "
    691 	    "anonymous application data"));
    692 	uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
    693 	    SYSCTL_DESCR("Percentage of physical memory which will "
    694 	    "be reclaimed from other usage for cached "
    695 	    "file data"));
    696 	uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
    697 	    SYSCTL_DESCR("Percentage of physical memory which will "
    698 	    "be reclaimed from other usage for cached "
    699 	    "executable data"));
    700 
    701 	uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
    702 	    SYSCTL_DESCR("Percentage of inactive queue of "
    703 	    "the entire (active + inactive) queue"));
    704 }
    705 
    706 #endif /* !defined(PDSIM) */
    707 
    708 #if defined(PDSIM)
    709 void
    710 pdsim_dump(const char *id)
    711 {
    712 #if defined(DEBUG)
    713 	/* XXX */
    714 #endif /* defined(DEBUG) */
    715 }
    716 #endif /* defined(PDSIM) */
    717