uvm_pdpolicy_clock.c revision 1.12.16.2 1 /* $NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.2 2012/02/09 03:05:01 matt Exp $ */
2 /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Charles D. Cranor,
24 * Washington University, the University of California, Berkeley and
25 * its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
43 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70 #if defined(PDSIM)
71
72 #include "pdsim.h"
73
74 #else /* defined(PDSIM) */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.2 2012/02/09 03:05:01 matt Exp $");
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_pdpolicy.h>
86 #include <uvm/uvm_pdpolicy_impl.h>
87
88 #endif /* defined(PDSIM) */
89
90 #define PQ_INACTIVE PQ_PRIVATE1 /* page is in inactive list */
91 #define PQ_ACTIVE PQ_PRIVATE2 /* page is in active list */
92
93 #if !defined(CLOCK_INACTIVEPCT)
94 #define CLOCK_INACTIVEPCT 33
95 #endif /* !defined(CLOCK_INACTIVEPCT) */
96
97 struct uvmpdpol_scanstate {
98 struct vm_page *ss_nextpg;
99 bool ss_first;
100 bool ss_anonreact, ss_filereact, ss_execreact;
101 };
102
103 struct uvmpdpol_groupstate {
104 struct pglist gs_activeq; /* allocated pages, in use */
105 struct pglist gs_inactiveq; /* pages between the clock hands */
106 u_int gs_active;
107 u_int gs_inactive;
108 u_int gs_inactarg;
109 struct uvmpdpol_scanstate gs_scanstate;
110 };
111
112 struct uvmpdpol_globalstate {
113 struct uvmpdpol_groupstate *s_pggroups;
114 struct uvm_pctparam s_anonmin;
115 struct uvm_pctparam s_filemin;
116 struct uvm_pctparam s_execmin;
117 struct uvm_pctparam s_anonmax;
118 struct uvm_pctparam s_filemax;
119 struct uvm_pctparam s_execmax;
120 struct uvm_pctparam s_inactivepct;
121 };
122
123
124 static struct uvmpdpol_globalstate pdpol_state;
125
126 PDPOL_EVCNT_DEFINE(reactexec)
127 PDPOL_EVCNT_DEFINE(reactfile)
128 PDPOL_EVCNT_DEFINE(reactanon)
129
130 #ifdef DEBUG
131 static size_t
132 clock_pglist_count(struct pglist *pglist)
133 {
134 size_t count = 0;
135 struct vm_page *pg;
136 TAILQ_FOREACH(pg, pglist, pageq.queue) {
137 count++;
138 }
139 return count;
140 }
141 #endif
142
143 static size_t
144 clock_space(void)
145 {
146 return sizeof(struct uvmpdpol_groupstate);
147 }
148
149 static void
150 clock_tune(struct uvm_pggroup *grp)
151 {
152 struct uvmpdpol_globalstate * const s = &pdpol_state;
153 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
154
155 gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
156 gs->gs_active + gs->gs_inactive);
157 if (gs->gs_inactarg <= grp->pgrp_freetarg) {
158 gs->gs_inactarg = grp->pgrp_freetarg + 1;
159 }
160 }
161
162 void
163 uvmpdpol_scaninit(struct uvm_pggroup *grp)
164 {
165 struct uvmpdpol_globalstate * const s = &pdpol_state;
166 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
167 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
168 bool anonunder, fileunder, execunder;
169 bool anonover, fileover, execover;
170 bool anonreact, filereact, execreact;
171
172 /*
173 * decide which types of pages we want to reactivate instead of freeing
174 * to keep usage within the minimum and maximum usage limits.
175 */
176
177 u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
178 anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
179 fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
180 execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
181 anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
182 fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
183 execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
184 anonreact = anonunder || (!anonover && (fileover || execover));
185 filereact = fileunder || (!fileover && (anonover || execover));
186 execreact = execunder || (!execover && (anonover || fileover));
187 if (filereact && execreact && (anonreact || uvm_swapisfull())) {
188 anonreact = filereact = execreact = false;
189 }
190 ss->ss_anonreact = anonreact;
191 ss->ss_filereact = filereact;
192 ss->ss_execreact = execreact;
193
194 ss->ss_first = true;
195 }
196
197 struct vm_page *
198 uvmpdpol_selectvictim(struct uvm_pggroup *grp)
199 {
200 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
201 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
202 struct vm_page *pg;
203 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
204
205 KASSERT(mutex_owned(&uvm_pageqlock));
206
207 while (/* CONSTCOND */ 1) {
208 struct vm_anon *anon;
209 struct uvm_object *uobj;
210
211 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
212
213 if (ss->ss_first) {
214 pg = TAILQ_FIRST(&gs->gs_inactiveq);
215 ss->ss_first = false;
216 UVMHIST_LOG(pdhist, " select first inactive page: %p",
217 pg, 0, 0, 0);
218 } else {
219 pg = ss->ss_nextpg;
220 if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
221 pg = TAILQ_FIRST(&gs->gs_inactiveq);
222 }
223 UVMHIST_LOG(pdhist, " select next inactive page: %p",
224 pg, 0, 0, 0);
225 }
226 if (pg == NULL) {
227 break;
228 }
229 ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
230
231 grp->pgrp_pdscans++;
232
233 /*
234 * move referenced pages back to active queue and
235 * skip to next page.
236 */
237
238 if (pmap_is_referenced(pg)) {
239 uvmpdpol_pageactivate(pg);
240 grp->pgrp_pdreact++;
241 continue;
242 }
243
244 anon = pg->uanon;
245 uobj = pg->uobject;
246
247 /*
248 * enforce the minimum thresholds on different
249 * types of memory usage. if reusing the current
250 * page would reduce that type of usage below its
251 * minimum, reactivate the page instead and move
252 * on to the next page.
253 */
254
255 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
256 uvmpdpol_pageactivate(pg);
257 PDPOL_EVCNT_INCR(reactexec);
258 continue;
259 }
260 if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
261 !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
262 uvmpdpol_pageactivate(pg);
263 PDPOL_EVCNT_INCR(reactfile);
264 continue;
265 }
266 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
267 uvmpdpol_pageactivate(pg);
268 PDPOL_EVCNT_INCR(reactanon);
269 continue;
270 }
271
272 break;
273 }
274
275 return pg;
276 }
277
278 void
279 uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
280 {
281 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
282
283 struct vm_page *pg, *nextpg;
284
285 /*
286 * we have done the scan to get free pages. now we work on meeting
287 * our inactive target.
288 */
289
290 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
291
292 u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
293 for (pg = TAILQ_FIRST(&gs->gs_activeq);
294 pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
295 pg = nextpg) {
296 nextpg = TAILQ_NEXT(pg, pageq.queue);
297
298 /*
299 * if there's a shortage of swap slots, try to free it.
300 */
301
302 #ifdef VMSWAP
303 if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
304 if (uvmpd_trydropswap(pg)) {
305 swap_shortage--;
306 }
307 }
308 #endif
309
310 /*
311 * if there's a shortage of inactive pages, deactivate.
312 */
313
314 if (inactive_shortage > 0) {
315 /* no need to check wire_count as pg is "active" */
316 uvmpdpol_pagedeactivate(pg);
317 grp->pgrp_pddeact++;
318 inactive_shortage--;
319 }
320 }
321
322 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
323 }
324
325 void
326 uvmpdpol_pagedeactivate(struct vm_page *pg)
327 {
328 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
329 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
330
331 KASSERT(mutex_owned(&uvm_pageqlock));
332
333 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
334
335 if (pg->pqflags & PQ_ACTIVE) {
336 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
337 pg->pqflags &= ~PQ_ACTIVE;
338 KASSERT(gs->gs_active > 0);
339 gs->gs_active--;
340 grp->pgrp_active--;
341 }
342
343 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
344 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
345
346 if ((pg->pqflags & PQ_INACTIVE) == 0) {
347 KASSERT(pg->wire_count == 0);
348 pmap_clear_reference(pg);
349 TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
350 pg->pqflags |= PQ_INACTIVE;
351 gs->gs_inactive++;
352 grp->pgrp_inactive++;
353 }
354
355 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
356 }
357
358 void
359 uvmpdpol_pageactivate(struct vm_page *pg)
360 {
361 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
362 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
363
364 uvmpdpol_pagedequeue(pg);
365 TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
366 pg->pqflags |= PQ_ACTIVE;
367 gs->gs_active++;
368 grp->pgrp_active++;
369
370 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
371 }
372
373 void
374 uvmpdpol_pagedequeue(struct vm_page *pg)
375 {
376 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
377 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
378
379 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
380
381 if (pg->pqflags & PQ_ACTIVE) {
382 KASSERT(mutex_owned(&uvm_pageqlock));
383 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
384 pg->pqflags &= ~PQ_ACTIVE;
385 KASSERT(gs->gs_active > 0);
386 gs->gs_active--;
387 grp->pgrp_active--;
388 }
389
390 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
391 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
392
393 if (pg->pqflags & PQ_INACTIVE) {
394 KASSERT(mutex_owned(&uvm_pageqlock));
395 TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
396 pg->pqflags &= ~PQ_INACTIVE;
397 KASSERT(gs->gs_inactive > 0);
398 gs->gs_inactive--;
399 grp->pgrp_inactive--;
400 }
401
402 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
403 }
404
405 void
406 uvmpdpol_pageenqueue(struct vm_page *pg)
407 {
408
409 uvmpdpol_pageactivate(pg);
410 }
411
412 void
413 uvmpdpol_anfree(struct vm_anon *an)
414 {
415 }
416
417 bool
418 uvmpdpol_pageisqueued_p(struct vm_page *pg)
419 {
420
421 return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
422 }
423
424 void
425 uvmpdpol_estimatepageable(u_int *activep, u_int *inactivep)
426 {
427 struct uvm_pggroup *grp;
428 u_int active = 0, inactive = 0;
429
430 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
431 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
432
433 KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
434 KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
435
436 active += gs->gs_active;
437 inactive += gs->gs_inactive;
438 }
439
440 if (activep) {
441 *activep = active;
442 }
443 if (inactivep) {
444 *inactivep = inactive;
445 }
446 }
447
448 #if !defined(PDSIM)
449 static int
450 min_check(struct uvm_pctparam *pct, int t)
451 {
452 struct uvmpdpol_globalstate * const s = &pdpol_state;
453 u_int total = t;
454
455 if (pct != &s->s_anonmin) {
456 total += uvm_pctparam_get(&s->s_anonmin);
457 }
458 if (pct != &s->s_filemin) {
459 total += uvm_pctparam_get(&s->s_filemin);
460 }
461 if (pct != &s->s_execmin) {
462 total += uvm_pctparam_get(&s->s_execmin);
463 }
464 if (total > 95) {
465 return EINVAL;
466 }
467 return 0;
468 }
469 #endif /* !defined(PDSIM) */
470
471 void
472 uvmpdpol_init(void *new_gs, size_t npggroups)
473 {
474 struct uvmpdpol_globalstate * const s = &pdpol_state;
475 struct uvmpdpol_groupstate *gs = new_gs;
476
477 s->s_pggroups = gs;
478
479 struct uvm_pggroup *grp = uvm.pggroups;
480 for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
481 TAILQ_INIT(&gs->gs_activeq);
482 TAILQ_INIT(&gs->gs_inactiveq);
483 grp->pgrp_gs = gs;
484 KASSERT(gs->gs_active == 0);
485 KASSERT(gs->gs_inactive == 0);
486 KASSERT(grp->pgrp_active == 0);
487 KASSERT(grp->pgrp_inactive == 0);
488 }
489 uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
490 uvm_pctparam_init(&s->s_anonmin, 10, min_check);
491 uvm_pctparam_init(&s->s_filemin, 10, min_check);
492 uvm_pctparam_init(&s->s_execmin, 5, min_check);
493 uvm_pctparam_init(&s->s_anonmax, 80, NULL);
494 uvm_pctparam_init(&s->s_filemax, 50, NULL);
495 uvm_pctparam_init(&s->s_execmax, 30, NULL);
496
497 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
498 uvmpdpol_scaninit(grp);
499 }
500 }
501
502 void
503 uvmpdpol_reinit(void)
504 {
505 }
506
507 bool
508 uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
509 {
510 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
511
512 return gs->gs_inactive < gs->gs_inactarg;
513 }
514
515 void
516 uvmpdpol_tune(struct uvm_pggroup *grp)
517 {
518
519 clock_tune(grp);
520 }
521
522 size_t
523 uvmpdpol_space(void)
524 {
525
526 return clock_space();
527 }
528
529 void
530 uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
531 size_t npggroups, size_t old_ncolors)
532 {
533 struct uvmpdpol_globalstate * const s = &pdpol_state;
534 struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
535 struct uvmpdpol_groupstate * const gs = new_gs;
536
537 s->s_pggroups = gs;
538
539 for (size_t i = 0; i < npggroups; i++) {
540 struct uvmpdpol_groupstate * const dst_gs = &gs[i];
541 TAILQ_INIT(&dst_gs->gs_activeq);
542 TAILQ_INIT(&dst_gs->gs_inactiveq);
543 uvm.pggroups[i].pgrp_gs = dst_gs;
544 }
545
546 const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
547 for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
548 struct vm_page *pg;
549 KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
550 while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
551 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
552 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
553
554 TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
555 src_gs->gs_inactive--;
556 xgs->gs_inactive++;
557 uvm.pggroups[pggroup].pgrp_inactive++;
558 KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
559 }
560 KASSERT(src_gs->gs_inactive == 0);
561
562 KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
563 while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
564 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
565 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
566
567 TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
568 src_gs->gs_active--;
569 xgs->gs_active++;
570 KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
571 uvm.pggroups[pggroup].pgrp_active++;
572 }
573 KASSERT(src_gs->gs_active == 0);
574 }
575
576 struct uvm_pggroup *grp;
577 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
578 clock_tune(grp);
579 uvmpdpol_scaninit(grp);
580 }
581 }
582
583 #if !defined(PDSIM)
584
585 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
586
587 void
588 uvmpdpol_sysctlsetup(void)
589 {
590 struct uvmpdpol_globalstate *s = &pdpol_state;
591
592 uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
593 SYSCTL_DESCR("Percentage of physical memory reserved "
594 "for anonymous application data"));
595 uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
596 SYSCTL_DESCR("Percentage of physical memory reserved "
597 "for cached file data"));
598 uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
599 SYSCTL_DESCR("Percentage of physical memory reserved "
600 "for cached executable data"));
601
602 uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
603 SYSCTL_DESCR("Percentage of physical memory which will "
604 "be reclaimed from other usage for "
605 "anonymous application data"));
606 uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
607 SYSCTL_DESCR("Percentage of physical memory which will "
608 "be reclaimed from other usage for cached "
609 "file data"));
610 uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
611 SYSCTL_DESCR("Percentage of physical memory which will "
612 "be reclaimed from other usage for cached "
613 "executable data"));
614
615 uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
616 SYSCTL_DESCR("Percentage of inactive queue of "
617 "the entire (active + inactive) queue"));
618 }
619
620 #endif /* !defined(PDSIM) */
621
622 #if defined(PDSIM)
623 void
624 pdsim_dump(const char *id)
625 {
626 #if defined(DEBUG)
627 /* XXX */
628 #endif /* defined(DEBUG) */
629 }
630 #endif /* defined(PDSIM) */
631