uvm_pdpolicy_clock.c revision 1.12.16.8 1 /* $NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.8 2012/04/27 20:41:09 matt Exp $ */
2 /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by Charles D. Cranor,
24 * Washington University, the University of California, Berkeley and
25 * its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
43 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70 #if defined(PDSIM)
71
72 #include "pdsim.h"
73
74 #else /* defined(PDSIM) */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.8 2012/04/27 20:41:09 matt Exp $");
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_pdpolicy.h>
86 #include <uvm/uvm_pdpolicy_impl.h>
87
88 #endif /* defined(PDSIM) */
89
90 #define PQ_INACTIVE PQ_PRIVATE1 /* page is in inactive list */
91 #define PQ_ACTIVE PQ_PRIVATE2 /* page is in active list */
92 #define PQ_RADIOACTIVE PQ_PRIVATE3 /* page is in radioactive list */
93
94 #if !defined(CLOCK_INACTIVEPCT)
95 #define CLOCK_INACTIVEPCT 33
96 #endif /* !defined(CLOCK_INACTIVEPCT) */
97
98 struct uvmpdpol_scanstate {
99 struct vm_page *ss_nextpg;
100 bool ss_first;
101 bool ss_anonreact, ss_filereact, ss_execreact;
102 };
103
104 struct uvmpdpol_groupstate {
105 struct pglist gs_activeq; /* allocated pages, in use */
106 struct pglist gs_inactiveq; /* pages between the clock hands */
107 struct pglist gs_radioactiveq; /* allocated pages, in use */
108 u_int gs_active;
109 u_int gs_radioactive;
110 u_int gs_inactive;
111 u_int gs_inactarg;
112 struct uvmpdpol_scanstate gs_scanstate;
113 };
114
115 struct uvmpdpol_globalstate {
116 struct uvmpdpol_groupstate *s_pggroups;
117 struct uvm_pctparam s_anonmin;
118 struct uvm_pctparam s_filemin;
119 struct uvm_pctparam s_execmin;
120 struct uvm_pctparam s_anonmax;
121 struct uvm_pctparam s_filemax;
122 struct uvm_pctparam s_execmax;
123 struct uvm_pctparam s_inactivepct;
124 };
125
126
127 static struct uvmpdpol_globalstate pdpol_state;
128
129 PDPOL_EVCNT_DEFINE(reactexec)
130 PDPOL_EVCNT_DEFINE(reactfile)
131 PDPOL_EVCNT_DEFINE(reactanon)
132
133 #ifdef DEBUG
134 static size_t
135 clock_pglist_count(struct pglist *pglist)
136 {
137 size_t count = 0;
138 struct vm_page *pg;
139 TAILQ_FOREACH(pg, pglist, pageq.queue) {
140 count++;
141 }
142 return count;
143 }
144 #endif
145
146 static size_t
147 clock_space(void)
148 {
149 return sizeof(struct uvmpdpol_groupstate);
150 }
151
152 static void
153 clock_tune(struct uvm_pggroup *grp)
154 {
155 struct uvmpdpol_globalstate * const s = &pdpol_state;
156 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
157
158 gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
159 gs->gs_active + gs->gs_inactive);
160 if (gs->gs_inactarg <= grp->pgrp_freetarg) {
161 gs->gs_inactarg = grp->pgrp_freetarg + 1;
162 }
163 }
164
165 void
166 uvmpdpol_scaninit(struct uvm_pggroup *grp)
167 {
168 struct uvmpdpol_globalstate * const s = &pdpol_state;
169 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
170 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
171 bool anonunder, fileunder, execunder;
172 bool anonover, fileover, execover;
173 bool anonreact, filereact, execreact;
174
175 /*
176 * decide which types of pages we want to reactivate instead of freeing
177 * to keep usage within the minimum and maximum usage limits.
178 */
179
180 u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
181 anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
182 fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
183 execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
184 anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
185 fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
186 execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
187 anonreact = anonunder || (!anonover && (fileover || execover));
188 filereact = fileunder || (!fileover && (anonover || execover));
189 execreact = execunder || (!execover && (anonover || fileover));
190 if (filereact && execreact && (anonreact || uvm_swapisfull())) {
191 anonreact = filereact = execreact = false;
192 }
193 ss->ss_anonreact = anonreact;
194 ss->ss_filereact = filereact;
195 ss->ss_execreact = execreact;
196
197 ss->ss_first = true;
198 }
199
200 struct vm_page *
201 uvmpdpol_selectvictim(struct uvm_pggroup *grp)
202 {
203 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
204 struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
205 struct vm_page *pg;
206 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
207
208 KASSERT(mutex_owned(&uvm_pageqlock));
209
210 while (/* CONSTCOND */ 1) {
211 struct vm_anon *anon;
212 struct uvm_object *uobj;
213
214 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
215
216 if (ss->ss_first) {
217 pg = TAILQ_FIRST(&gs->gs_inactiveq);
218 ss->ss_first = false;
219 UVMHIST_LOG(pdhist, " select first inactive page: %p",
220 pg, 0, 0, 0);
221 } else {
222 pg = ss->ss_nextpg;
223 if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
224 pg = TAILQ_FIRST(&gs->gs_inactiveq);
225 }
226 UVMHIST_LOG(pdhist, " select next inactive page: %p",
227 pg, 0, 0, 0);
228 }
229 if (pg == NULL) {
230 break;
231 }
232 ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
233
234 grp->pgrp_pdscans++;
235
236 /*
237 * move referenced pages back to active queue and
238 * skip to next page.
239 */
240
241 if (pmap_is_referenced(pg)) {
242 uvmpdpol_pageactivate(pg);
243 grp->pgrp_pdreact++;
244 continue;
245 }
246
247 anon = pg->uanon;
248 uobj = pg->uobject;
249
250 /*
251 * enforce the minimum thresholds on different
252 * types of memory usage. if reusing the current
253 * page would reduce that type of usage below its
254 * minimum, reactivate the page instead and move
255 * on to the next page.
256 */
257
258 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
259 uvmpdpol_pageactivate(pg);
260 PDPOL_EVCNT_INCR(reactexec);
261 continue;
262 }
263 if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
264 !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
265 uvmpdpol_pageactivate(pg);
266 PDPOL_EVCNT_INCR(reactfile);
267 continue;
268 }
269 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
270 uvmpdpol_pageactivate(pg);
271 PDPOL_EVCNT_INCR(reactanon);
272 continue;
273 }
274
275 break;
276 }
277
278 return pg;
279 }
280
281 void
282 uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
283 {
284 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
285
286 struct vm_page *pg, *nextpg;
287
288 /*
289 * we have done the scan to get free pages. now we work on meeting
290 * our inactive target.
291 */
292
293 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
294
295 /*
296 * If swap was added, move all the pages from radioactive queue to the
297 * active queue.
298 */
299 #ifdef VMSWAP
300 if (uvmexp.nswapdev > 0) {
301 while ((pg = TAILQ_FIRST(&gs->gs_radioactiveq)) != NULL) {
302 uvmpdpol_pageactivate(pg);
303 }
304 }
305 #endif
306
307 u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
308 for (pg = TAILQ_FIRST(&gs->gs_activeq);
309 pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
310 pg = nextpg) {
311 nextpg = TAILQ_NEXT(pg, pageq.queue);
312
313 /*
314 * if there's a shortage of swap slots, try to free it.
315 */
316
317 #ifdef VMSWAP
318 if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
319 if (uvmpd_trydropswap(pg)) {
320 swap_shortage--;
321 }
322 }
323 #endif
324
325 /*
326 * if there's a shortage of inactive pages, deactivate.
327 */
328 if (inactive_shortage > 0) {
329 /* no need to check wire_count as pg is "active" */
330 uvmpdpol_pagedeactivate(pg);
331 grp->pgrp_pddeact++;
332 inactive_shortage--;
333 }
334 }
335
336 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
337 }
338
339 void
340 uvmpdpol_pagedeactivate(struct vm_page *pg)
341 {
342 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
343 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
344
345 #if 0
346 /*
347 * If there is no swap available and the page is anonymous without
348 * a backing store, don't bother marking it INACTIVE since it would
349 * only be a "dirty reactivation".
350 */
351 if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
352 KASSERT(pg->pqflags & PQ_RADIOACTIVE);
353 return;
354 }
355 #else
356 KASSERT((pg->pqflags & PQ_RADIOACTIVE) == 0);
357 #endif
358
359 KASSERT(!(pg->pqflags & PQ_FREE));
360 KASSERT(mutex_owned(&uvm_pageqlock));
361
362 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
363
364 if (pg->pqflags & PQ_ACTIVE) {
365 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
366 pg->pqflags &= ~PQ_ACTIVE;
367 KASSERT(gs->gs_active > 0);
368 gs->gs_active--;
369 grp->pgrp_active--;
370 }
371
372 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
373 //KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
374
375 if (pg->pqflags & PQ_RADIOACTIVE) {
376 TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
377 pg->pqflags &= ~PQ_RADIOACTIVE;
378 KASSERT(gs->gs_radioactive > 0);
379 gs->gs_radioactive--;
380 grp->pgrp_active--;
381 }
382
383 //KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
384 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
385
386 if ((pg->pqflags & PQ_INACTIVE) == 0) {
387 KASSERT(pg->wire_count == 0);
388 pmap_clear_reference(pg);
389 TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
390 pg->pqflags |= PQ_INACTIVE;
391 gs->gs_inactive++;
392 grp->pgrp_inactive++;
393 }
394
395 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
396 }
397
398 void
399 uvmpdpol_pageactivate(struct vm_page *pg)
400 {
401 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
402 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
403
404 KASSERT(!(pg->pqflags & PQ_FREE));
405 KASSERT(mutex_owned(&uvm_pageqlock));
406
407 uvmpdpol_pagedequeue(pg);
408 if (uvmexp.nswapdev < 1 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
409 TAILQ_INSERT_TAIL(&gs->gs_radioactiveq, pg, pageq.queue);
410 pg->pqflags |= PQ_RADIOACTIVE;
411 gs->gs_radioactive++;
412 } else {
413 TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
414 pg->pqflags |= PQ_ACTIVE;
415 gs->gs_active++;
416 grp->pgrp_active++;
417 }
418
419 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
420 }
421
422 void
423 uvmpdpol_pagedequeue(struct vm_page *pg)
424 {
425 struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
426 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
427
428 KASSERT(!(pg->pqflags & PQ_FREE));
429 KASSERT(mutex_owned(&uvm_pageqlock));
430 //KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
431
432 if (pg->pqflags & PQ_RADIOACTIVE) {
433 TAILQ_REMOVE(&gs->gs_radioactiveq, pg, pageq.queue);
434 pg->pqflags &= ~PQ_RADIOACTIVE;
435 KASSERT(gs->gs_radioactive > 0);
436 gs->gs_radioactive--;
437 }
438
439 //KDASSERT(gs->gs_radioactive == clock_pglist_count(&gs->gs_radioactiveq));
440 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
441
442 if (pg->pqflags & PQ_ACTIVE) {
443 TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
444 pg->pqflags &= ~PQ_ACTIVE;
445 KASSERT(gs->gs_active > 0);
446 gs->gs_active--;
447 grp->pgrp_active--;
448 }
449
450 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
451 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
452
453 if (pg->pqflags & PQ_INACTIVE) {
454 TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
455 pg->pqflags &= ~PQ_INACTIVE;
456 KASSERT(gs->gs_inactive > 0);
457 gs->gs_inactive--;
458 grp->pgrp_inactive--;
459 }
460
461 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
462 }
463
464 void
465 uvmpdpol_pageenqueue(struct vm_page *pg)
466 {
467
468 uvmpdpol_pageactivate(pg);
469 }
470
471 void
472 uvmpdpol_anfree(struct vm_anon *an)
473 {
474 }
475
476 bool
477 uvmpdpol_pageisqueued_p(struct vm_page *pg)
478 {
479
480 return (pg->pqflags & (PQ_RADIOACTIVE | PQ_ACTIVE | PQ_INACTIVE)) != 0;
481 }
482
483 void
484 uvmpdpol_estimatepageable(const struct uvm_pggroup *grp,
485 u_int *activep, u_int *inactivep)
486 {
487 if (grp != NULL) {
488 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
489 if (activep) {
490 *activep += gs->gs_active + gs->gs_radioactive;
491 }
492 if (inactivep) {
493 *inactivep = gs->gs_inactive;
494 }
495 return;
496 }
497
498 u_int active = 0, inactive = 0;
499 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
500 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
501
502 //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
503 //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
504
505 active += gs->gs_active + gs->gs_radioactive;
506 inactive += gs->gs_inactive;
507 }
508
509 if (activep) {
510 *activep = active;
511 }
512 if (inactivep) {
513 *inactivep = inactive;
514 }
515 }
516
517 #if !defined(PDSIM)
518 static int
519 min_check(struct uvm_pctparam *pct, int t)
520 {
521 struct uvmpdpol_globalstate * const s = &pdpol_state;
522 u_int total = t;
523
524 if (pct != &s->s_anonmin) {
525 total += uvm_pctparam_get(&s->s_anonmin);
526 }
527 if (pct != &s->s_filemin) {
528 total += uvm_pctparam_get(&s->s_filemin);
529 }
530 if (pct != &s->s_execmin) {
531 total += uvm_pctparam_get(&s->s_execmin);
532 }
533 if (total > 95) {
534 return EINVAL;
535 }
536 return 0;
537 }
538 #endif /* !defined(PDSIM) */
539
540 void
541 uvmpdpol_init(void *new_gs, size_t npggroups)
542 {
543 struct uvmpdpol_globalstate * const s = &pdpol_state;
544 struct uvmpdpol_groupstate *gs = new_gs;
545
546 s->s_pggroups = gs;
547
548 struct uvm_pggroup *grp = uvm.pggroups;
549 for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
550 TAILQ_INIT(&gs->gs_activeq);
551 TAILQ_INIT(&gs->gs_inactiveq);
552 TAILQ_INIT(&gs->gs_radioactiveq);
553 grp->pgrp_gs = gs;
554 KASSERT(gs->gs_active == 0);
555 KASSERT(gs->gs_inactive == 0);
556 KASSERT(gs->gs_radioactive == 0);
557 KASSERT(grp->pgrp_active == 0);
558 KASSERT(grp->pgrp_inactive == 0);
559 }
560 uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
561 uvm_pctparam_init(&s->s_anonmin, 10, min_check);
562 uvm_pctparam_init(&s->s_filemin, 10, min_check);
563 uvm_pctparam_init(&s->s_execmin, 5, min_check);
564 uvm_pctparam_init(&s->s_anonmax, 80, NULL);
565 uvm_pctparam_init(&s->s_filemax, 50, NULL);
566 uvm_pctparam_init(&s->s_execmax, 30, NULL);
567
568 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
569 uvmpdpol_scaninit(grp);
570 }
571 }
572
573 void
574 uvmpdpol_reinit(void)
575 {
576 }
577
578 bool
579 uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
580 {
581 struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
582
583 return grp->pgrp_freemin > 0 && gs->gs_inactive < gs->gs_inactarg;
584 }
585
586 void
587 uvmpdpol_tune(struct uvm_pggroup *grp)
588 {
589
590 clock_tune(grp);
591 }
592
593 size_t
594 uvmpdpol_space(void)
595 {
596
597 return clock_space();
598 }
599
600 void
601 uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
602 size_t npggroups, size_t old_ncolors)
603 {
604 struct uvmpdpol_globalstate * const s = &pdpol_state;
605 struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
606 struct uvmpdpol_groupstate * const gs = new_gs;
607
608 s->s_pggroups = gs;
609
610 for (size_t i = 0; i < npggroups; i++) {
611 struct uvmpdpol_groupstate * const dst_gs = &gs[i];
612 TAILQ_INIT(&dst_gs->gs_activeq);
613 TAILQ_INIT(&dst_gs->gs_inactiveq);
614 TAILQ_INIT(&dst_gs->gs_radioactiveq);
615 uvm.pggroups[i].pgrp_gs = dst_gs;
616 }
617
618 const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
619 for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
620 struct vm_page *pg;
621 KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
622 while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
623 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
624 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
625
626 TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
627 src_gs->gs_inactive--;
628 xgs->gs_inactive++;
629 uvm.pggroups[pggroup].pgrp_inactive++;
630 KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
631 }
632 KASSERT(src_gs->gs_inactive == 0);
633
634 KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
635 while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
636 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
637 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
638
639 TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
640 src_gs->gs_active--;
641 xgs->gs_active++;
642 KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
643 uvm.pggroups[pggroup].pgrp_active++;
644 }
645 KASSERT(src_gs->gs_active == 0);
646
647 KDASSERT(src_gs->gs_radioactive == clock_pglist_count(&src_gs->gs_radioactiveq));
648 while ((pg = TAILQ_FIRST(&src_gs->gs_radioactiveq)) != NULL) {
649 u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
650 struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
651
652 TAILQ_INSERT_TAIL(&xgs->gs_radioactiveq, pg, pageq.queue);
653 src_gs->gs_radioactive--;
654 xgs->gs_radioactive++;
655 KDASSERT(xgs->gs_radioactive == clock_pglist_count(&xgs->gs_radioactiveq));
656 uvm.pggroups[pggroup].pgrp_active++;
657 }
658 KASSERT(src_gs->gs_active == 0);
659 }
660
661 struct uvm_pggroup *grp;
662 STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
663 clock_tune(grp);
664 uvmpdpol_scaninit(grp);
665 }
666 }
667
668 #if !defined(PDSIM)
669
670 #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
671
672 void
673 uvmpdpol_sysctlsetup(void)
674 {
675 struct uvmpdpol_globalstate *s = &pdpol_state;
676
677 uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
678 SYSCTL_DESCR("Percentage of physical memory reserved "
679 "for anonymous application data"));
680 uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
681 SYSCTL_DESCR("Percentage of physical memory reserved "
682 "for cached file data"));
683 uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
684 SYSCTL_DESCR("Percentage of physical memory reserved "
685 "for cached executable data"));
686
687 uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
688 SYSCTL_DESCR("Percentage of physical memory which will "
689 "be reclaimed from other usage for "
690 "anonymous application data"));
691 uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
692 SYSCTL_DESCR("Percentage of physical memory which will "
693 "be reclaimed from other usage for cached "
694 "file data"));
695 uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
696 SYSCTL_DESCR("Percentage of physical memory which will "
697 "be reclaimed from other usage for cached "
698 "executable data"));
699
700 uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
701 SYSCTL_DESCR("Percentage of inactive queue of "
702 "the entire (active + inactive) queue"));
703 }
704
705 #endif /* !defined(PDSIM) */
706
707 #if defined(PDSIM)
708 void
709 pdsim_dump(const char *id)
710 {
711 #if defined(DEBUG)
712 /* XXX */
713 #endif /* defined(DEBUG) */
714 }
715 #endif /* defined(PDSIM) */
716