uvm_pdpolicy_clock.c revision 1.12.16.4 1 /* $NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.4 2012/02/17 23:35:31 matt Exp $ */
2 /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Charles D. Cranor,
24 * Washington University, the University of California, Berkeley and
25 * its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
43 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70 #if defined(PDSIM)
71
72 #include "pdsim.h"
73
74 #else /* defined(PDSIM) */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.4 2012/02/17 23:35:31 matt Exp $");
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_pdpolicy.h>
86 #include <uvm/uvm_pdpolicy_impl.h>
87
88 #endif /* defined(PDSIM) */
89
90 #define PQ_INACTIVE PQ_PRIVATE1 /* page is in inactive list */
91 #define PQ_ACTIVE PQ_PRIVATE2 /* page is in active list */
92
93 #if !defined(CLOCK_INACTIVEPCT)
94 #define CLOCK_INACTIVEPCT 33
95 #endif /* !defined(CLOCK_INACTIVEPCT) */
96
97 struct uvmpdpol_scanstate {
98 struct vm_page *ss_nextpg;
99 bool ss_first;
100 bool ss_anonreact, ss_filereact, ss_execreact;
101 };
102
103 struct uvmpdpol_groupstate {
104 struct pglist gs_activeq; /* allocated pages, in use */
105 struct pglist gs_inactiveq; /* pages between the clock hands */
106 u_int gs_active;
107 u_int gs_inactive;
108 u_int gs_inactarg;
109 struct uvmpdpol_scanstate gs_scanstate;
110 };
111
112 struct uvmpdpol_globalstate {
113 struct uvmpdpol_groupstate *s_pggroups;
114 struct uvm_pctparam s_anonmin;
115 struct uvm_pctparam s_filemin;
116 struct uvm_pctparam s_execmin;
117 struct uvm_pctparam s_anonmax;
118 struct uvm_pctparam s_filemax;
119 struct uvm_pctparam s_execmax;
120 struct uvm_pctparam s_inactivepct;
121 };
122
123
124 static struct uvmpdpol_globalstate pdpol_state;
125
126 PDPOL_EVCNT_DEFINE(reactexec)
127 PDPOL_EVCNT_DEFINE(reactfile)
128 PDPOL_EVCNT_DEFINE(reactanon)
129
130 #ifdef DEBUG
131 static size_t
132 clock_pglist_count(struct pglist *pglist)
133 {
134 size_t count = 0;
135 struct vm_page *pg;
136 TAILQ_FOREACH(pg, pglist, pageq.queue) {
137 count++;
138 }
139 return count;
140 }
141 #endif
142
143 static size_t
144 clock_space(void)
145 {
146 return sizeof(struct uvmpdpol_groupstate);
147 }
148
149 static void
150 clock_tune(struct uvm_pggroup *grp)
151 {
152 struct uvmpdpol_globalstate * const s = &pdpol_state;
153 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
154
155 gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
156 gs->gs_active + gs->gs_inactive);
157 if (gs->gs_inactarg <= grp->pgrp_freetarg) {
158 gs->gs_inactarg = grp->pgrp_freetarg + 1;
159 }
160 }
161
162 void
163 uvmpdpol_scaninit(struct uvm_pggroup *grp)
164 {
165 struct uvmpdpol_globalstate * const s = &pdpol_state;
166 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
167 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
168 bool anonunder, fileunder, execunder;
169 bool anonover, fileover, execover;
170 bool anonreact, filereact, execreact;
171
172 /*
173 * decide which types of pages we want to reactivate instead of freeing
174 * to keep usage within the minimum and maximum usage limits.
175 */
176
177 u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
178 anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
179 fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
180 execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
181 anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
182 fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
183 execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
184 anonreact = anonunder || (!anonover && (fileover || execover));
185 filereact = fileunder || (!fileover && (anonover || execover));
186 execreact = execunder || (!execover && (anonover || fileover));
187 if (filereact && execreact && (anonreact || uvm_swapisfull())) {
188 anonreact = filereact = execreact = false;
189 }
190 ss->ss_anonreact = anonreact;
191 ss->ss_filereact = filereact;
192 ss->ss_execreact = execreact;
193
194 ss->ss_first = true;
195 }
196
197 struct vm_page *
198 uvmpdpol_selectvictim(struct uvm_pggroup *grp)
199 {
200 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
201 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
202 struct vm_page *pg;
203 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
204
205 KASSERT(mutex_owned(&uvm_pageqlock));
206
207 while (/* CONSTCOND */ 1) {
208 struct vm_anon *anon;
209 struct uvm_object *uobj;
210
211 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
212
213 if (ss->ss_first) {
214 pg = TAILQ_FIRST(&gs->gs_inactiveq);
215 ss->ss_first = false;
216 UVMHIST_LOG(pdhist, " select first inactive page: %p",
217 pg, 0, 0, 0);
218 } else {
219 pg = ss->ss_nextpg;
220 if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
221 pg = TAILQ_FIRST(&gs->gs_inactiveq);
222 }
223 UVMHIST_LOG(pdhist, " select next inactive page: %p",
224 pg, 0, 0, 0);
225 }
226 if (pg == NULL) {
227 break;
228 }
229 ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
230
231 grp->pgrp_pdscans++;
232
233 /*
234 * move referenced pages back to active queue and
235 * skip to next page.
236 */
237
238 if (pmap_is_referenced(pg)) {
239 uvmpdpol_pageactivate(pg);
240 grp->pgrp_pdreact++;
241 continue;
242 }
243
244 anon = pg->uanon;
245 uobj = pg->uobject;
246
247 /*
248 * enforce the minimum thresholds on different
249 * types of memory usage. if reusing the current
250 * page would reduce that type of usage below its
251 * minimum, reactivate the page instead and move
252 * on to the next page.
253 */
254
255 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
256 uvmpdpol_pageactivate(pg);
257 PDPOL_EVCNT_INCR(reactexec);
258 continue;
259 }
260 if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
261 !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
262 uvmpdpol_pageactivate(pg);
263 PDPOL_EVCNT_INCR(reactfile);
264 continue;
265 }
266 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
267 uvmpdpol_pageactivate(pg);
268 PDPOL_EVCNT_INCR(reactanon);
269 continue;
270 }
271
272 break;
273 }
274
275 return pg;
276 }
277
278 void
279 uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
280 {
281 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
282
283 struct vm_page *pg, *nextpg;
284
285 /*
286 * we have done the scan to get free pages. now we work on meeting
287 * our inactive target.
288 */
289
290 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
291
292 u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
293 for (pg = TAILQ_FIRST(&gs->gs_activeq);
294 pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
295 pg = nextpg) {
296 nextpg = TAILQ_NEXT(pg, pageq.queue);
297
298 /*
299 * if there's a shortage of swap slots, try to free it.
300 */
301
302 #ifdef VMSWAP
303 if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
304 if (uvmpd_trydropswap(pg)) {
305 swap_shortage--;
306 }
307 }
308 #endif
309
310 /*
311 * if there's a shortage of inactive pages, deactivate.
312 */
313
314 if (inactive_shortage > 0) {
315 /* no need to check wire_count as pg is "active" */
316 uvmpdpol_pagedeactivate(pg);
317 grp->pgrp_pddeact++;
318 inactive_shortage--;
319 }
320 }
321
322 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
323 }
324
325 void
326 uvmpdpol_pagedeactivate(struct vm_page *pg)
327 {
328 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
329 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
330
331 KASSERT(!(pg->pqflags & PQ_FREE));
332 KASSERT(mutex_owned(&uvm_pageqlock));
333
334 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
335
336 if (pg->pqflags & PQ_ACTIVE) {
337 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
338 pg->pqflags &= ~PQ_ACTIVE;
339 KASSERT(gs->gs_active > 0);
340 gs->gs_active--;
341 grp->pgrp_active--;
342 }
343
344 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
345 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
346
347 if ((pg->pqflags & PQ_INACTIVE) == 0) {
348 KASSERT(pg->wire_count == 0);
349 pmap_clear_reference(pg);
350 TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
351 pg->pqflags |= PQ_INACTIVE;
352 gs->gs_inactive++;
353 grp->pgrp_inactive++;
354 }
355
356 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
357 }
358
359 void
360 uvmpdpol_pageactivate(struct vm_page *pg)
361 {
362 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
363 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
364
365 KASSERT(!(pg->pqflags & PQ_FREE));
366 KASSERT(mutex_owned(&uvm_pageqlock));
367
368 uvmpdpol_pagedequeue(pg);
369 TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
370 pg->pqflags |= PQ_ACTIVE;
371 gs->gs_active++;
372 grp->pgrp_active++;
373
374 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
375 }
376
377 void
378 uvmpdpol_pagedequeue(struct vm_page *pg)
379 {
380 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
381 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
382
383 KASSERT(!(pg->pqflags & PQ_FREE));
384 KASSERT(mutex_owned(&uvm_pageqlock));
385 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
386
387 if (pg->pqflags & PQ_ACTIVE) {
388 KASSERT(mutex_owned(&uvm_pageqlock));
389 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
390 pg->pqflags &= ~PQ_ACTIVE;
391 KASSERT(gs->gs_active > 0);
392 gs->gs_active--;
393 grp->pgrp_active--;
394 }
395
396 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
397 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
398
399 if (pg->pqflags & PQ_INACTIVE) {
400 KASSERT(mutex_owned(&uvm_pageqlock));
401 TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
402 pg->pqflags &= ~PQ_INACTIVE;
403 KASSERT(gs->gs_inactive > 0);
404 gs->gs_inactive--;
405 grp->pgrp_inactive--;
406 }
407
408 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
409 }
410
411 void
412 uvmpdpol_pageenqueue(struct vm_page *pg)
413 {
414
415 uvmpdpol_pageactivate(pg);
416 }
417
418 void
419 uvmpdpol_anfree(struct vm_anon *an)
420 {
421 }
422
423 bool
424 uvmpdpol_pageisqueued_p(struct vm_page *pg)
425 {
426
427 return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
428 }
429
430 void
431 uvmpdpol_estimatepageable(u_int *activep, u_int *inactivep)
432 {
433 struct uvm_pggroup *grp;
434 u_int active = 0, inactive = 0;
435
436 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
437 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
438
439 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
440 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
441
442 active += gs->gs_active;
443 inactive += gs->gs_inactive;
444 }
445
446 if (activep) {
447 *activep = active;
448 }
449 if (inactivep) {
450 *inactivep = inactive;
451 }
452 }
453
454 #if !defined(PDSIM)
455 static int
456 min_check(struct uvm_pctparam *pct, int t)
457 {
458 struct uvmpdpol_globalstate * const s = &pdpol_state;
459 u_int total = t;
460
461 if (pct != &s->s_anonmin) {
462 total += uvm_pctparam_get(&s->s_anonmin);
463 }
464 if (pct != &s->s_filemin) {
465 total += uvm_pctparam_get(&s->s_filemin);
466 }
467 if (pct != &s->s_execmin) {
468 total += uvm_pctparam_get(&s->s_execmin);
469 }
470 if (total > 95) {
471 return EINVAL;
472 }
473 return 0;
474 }
475 #endif /* !defined(PDSIM) */
476
477 void
478 uvmpdpol_init(void *new_gs, size_t npggroups)
479 {
480 struct uvmpdpol_globalstate * const s = &pdpol_state;
481 struct uvmpdpol_groupstate *gs = new_gs;
482
483 s->s_pggroups = gs;
484
485 struct uvm_pggroup *grp = uvm.pggroups;
486 for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
487 TAILQ_INIT(&gs->gs_activeq);
488 TAILQ_INIT(&gs->gs_inactiveq);
489 grp->pgrp_gs = gs;
490 KASSERT(gs->gs_active == 0);
491 KASSERT(gs->gs_inactive == 0);
492 KASSERT(grp->pgrp_active == 0);
493 KASSERT(grp->pgrp_inactive == 0);
494 }
495 uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
496 uvm_pctparam_init(&s->s_anonmin, 10, min_check);
497 uvm_pctparam_init(&s->s_filemin, 10, min_check);
498 uvm_pctparam_init(&s->s_execmin, 5, min_check);
499 uvm_pctparam_init(&s->s_anonmax, 80, NULL);
500 uvm_pctparam_init(&s->s_filemax, 50, NULL);
501 uvm_pctparam_init(&s->s_execmax, 30, NULL);
502
503 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
504 uvmpdpol_scaninit(grp);
505 }
506 }
507
508 void
509 uvmpdpol_reinit(void)
510 {
511 }
512
513 bool
514 uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
515 {
516 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
517
518 return gs->gs_inactive < gs->gs_inactarg;
519 }
520
521 void
522 uvmpdpol_tune(struct uvm_pggroup *grp)
523 {
524
525 clock_tune(grp);
526 }
527
528 size_t
529 uvmpdpol_space(void)
530 {
531
532 return clock_space();
533 }
534
535 void
536 uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
537 size_t npggroups, size_t old_ncolors)
538 {
539 struct uvmpdpol_globalstate * const s = &pdpol_state;
540 struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
541 struct uvmpdpol_groupstate * const gs = new_gs;
542
543 s->s_pggroups = gs;
544
545 for (size_t i = 0; i < npggroups; i++) {
546 struct uvmpdpol_groupstate * const dst_gs = &gs[i];
547 TAILQ_INIT(&dst_gs->gs_activeq);
548 TAILQ_INIT(&dst_gs->gs_inactiveq);
549 uvm.pggroups[i].pgrp_gs = dst_gs;
550 }
551
552 const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
553 for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
554 struct vm_page *pg;
555 KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
556 while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
557 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
558 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
559
560 TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
561 src_gs->gs_inactive--;
562 xgs->gs_inactive++;
563 uvm.pggroups[pggroup].pgrp_inactive++;
564 KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
565 }
566 KASSERT(src_gs->gs_inactive == 0);
567
568 KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
569 while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
570 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
571 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
572
573 TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
574 src_gs->gs_active--;
575 xgs->gs_active++;
576 KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
577 uvm.pggroups[pggroup].pgrp_active++;
578 }
579 KASSERT(src_gs->gs_active == 0);
580 }
581
582 struct uvm_pggroup *grp;
583 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
584 clock_tune(grp);
585 uvmpdpol_scaninit(grp);
586 }
587 }
588
589 #if !defined(PDSIM)
590
591 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
592
593 void
594 uvmpdpol_sysctlsetup(void)
595 {
596 struct uvmpdpol_globalstate *s = &pdpol_state;
597
598 uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
599 SYSCTL_DESCR("Percentage of physical memory reserved "
600 "for anonymous application data"));
601 uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
602 SYSCTL_DESCR("Percentage of physical memory reserved "
603 "for cached file data"));
604 uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
605 SYSCTL_DESCR("Percentage of physical memory reserved "
606 "for cached executable data"));
607
608 uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
609 SYSCTL_DESCR("Percentage of physical memory which will "
610 "be reclaimed from other usage for "
611 "anonymous application data"));
612 uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
613 SYSCTL_DESCR("Percentage of physical memory which will "
614 "be reclaimed from other usage for cached "
615 "file data"));
616 uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
617 SYSCTL_DESCR("Percentage of physical memory which will "
618 "be reclaimed from other usage for cached "
619 "executable data"));
620
621 uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
622 SYSCTL_DESCR("Percentage of inactive queue of "
623 "the entire (active + inactive) queue"));
624 }
625
626 #endif /* !defined(PDSIM) */
627
628 #if defined(PDSIM)
629 void
630 pdsim_dump(const char *id)
631 {
632 #if defined(DEBUG)
633 /* XXX */
634 #endif /* defined(DEBUG) */
635 }
636 #endif /* defined(PDSIM) */
637