uvm_pdpolicy_clock.c revision 1.12.16.3 1 1.12.16.3 matt /* $NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.3 2012/02/12 07:30:25 matt Exp $ */
2 1.2 yamt /* NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $ */
3 1.2 yamt
4 1.2 yamt /*
5 1.2 yamt * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.2 yamt * Copyright (c) 1991, 1993, The Regents of the University of California.
7 1.2 yamt *
8 1.2 yamt * All rights reserved.
9 1.2 yamt *
10 1.2 yamt * This code is derived from software contributed to Berkeley by
11 1.2 yamt * The Mach Operating System project at Carnegie-Mellon University.
12 1.2 yamt *
13 1.2 yamt * Redistribution and use in source and binary forms, with or without
14 1.2 yamt * modification, are permitted provided that the following conditions
15 1.2 yamt * are met:
16 1.2 yamt * 1. Redistributions of source code must retain the above copyright
17 1.2 yamt * notice, this list of conditions and the following disclaimer.
18 1.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
19 1.2 yamt * notice, this list of conditions and the following disclaimer in the
20 1.2 yamt * documentation and/or other materials provided with the distribution.
21 1.2 yamt * 3. All advertising materials mentioning features or use of this software
22 1.2 yamt * must display the following acknowledgement:
23 1.2 yamt * This product includes software developed by Charles D. Cranor,
24 1.2 yamt * Washington University, the University of California, Berkeley and
25 1.2 yamt * its contributors.
26 1.2 yamt * 4. Neither the name of the University nor the names of its contributors
27 1.2 yamt * may be used to endorse or promote products derived from this software
28 1.2 yamt * without specific prior written permission.
29 1.2 yamt *
30 1.2 yamt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 1.2 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 1.2 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 1.2 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 1.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 1.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 1.2 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 1.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 1.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 1.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 1.2 yamt * SUCH DAMAGE.
41 1.2 yamt *
42 1.2 yamt * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
43 1.2 yamt * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
44 1.2 yamt *
45 1.2 yamt *
46 1.2 yamt * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 1.2 yamt * All rights reserved.
48 1.2 yamt *
49 1.2 yamt * Permission to use, copy, modify and distribute this software and
50 1.2 yamt * its documentation is hereby granted, provided that both the copyright
51 1.2 yamt * notice and this permission notice appear in all copies of the
52 1.2 yamt * software, derivative works or modified versions, and any portions
53 1.2 yamt * thereof, and that both notices appear in supporting documentation.
54 1.2 yamt *
55 1.2 yamt * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 1.2 yamt * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 1.2 yamt * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 1.2 yamt *
59 1.2 yamt * Carnegie Mellon requests users of this software to return to
60 1.2 yamt *
61 1.2 yamt * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
62 1.2 yamt * School of Computer Science
63 1.2 yamt * Carnegie Mellon University
64 1.2 yamt * Pittsburgh PA 15213-3890
65 1.2 yamt *
66 1.2 yamt * any improvements or extensions that they make and grant Carnegie the
67 1.2 yamt * rights to redistribute these changes.
68 1.2 yamt */
69 1.2 yamt
70 1.2 yamt #if defined(PDSIM)
71 1.2 yamt
72 1.2 yamt #include "pdsim.h"
73 1.2 yamt
74 1.2 yamt #else /* defined(PDSIM) */
75 1.2 yamt
76 1.2 yamt #include <sys/cdefs.h>
77 1.12.16.3 matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.12.16.3 2012/02/12 07:30:25 matt Exp $");
78 1.2 yamt
79 1.2 yamt #include <sys/param.h>
80 1.2 yamt #include <sys/proc.h>
81 1.2 yamt #include <sys/systm.h>
82 1.2 yamt #include <sys/kernel.h>
83 1.2 yamt
84 1.2 yamt #include <uvm/uvm.h>
85 1.2 yamt #include <uvm/uvm_pdpolicy.h>
86 1.2 yamt #include <uvm/uvm_pdpolicy_impl.h>
87 1.2 yamt
88 1.2 yamt #endif /* defined(PDSIM) */
89 1.2 yamt
90 1.2 yamt #define PQ_INACTIVE PQ_PRIVATE1 /* page is in inactive list */
91 1.2 yamt #define PQ_ACTIVE PQ_PRIVATE2 /* page is in active list */
92 1.2 yamt
93 1.2 yamt #if !defined(CLOCK_INACTIVEPCT)
94 1.2 yamt #define CLOCK_INACTIVEPCT 33
95 1.2 yamt #endif /* !defined(CLOCK_INACTIVEPCT) */
96 1.2 yamt
97 1.12.16.2 matt struct uvmpdpol_scanstate {
98 1.12.16.2 matt struct vm_page *ss_nextpg;
99 1.12.16.2 matt bool ss_first;
100 1.12.16.2 matt bool ss_anonreact, ss_filereact, ss_execreact;
101 1.12.16.2 matt };
102 1.12.16.2 matt
103 1.12.16.2 matt struct uvmpdpol_groupstate {
104 1.12.16.2 matt struct pglist gs_activeq; /* allocated pages, in use */
105 1.12.16.2 matt struct pglist gs_inactiveq; /* pages between the clock hands */
106 1.12.16.2 matt u_int gs_active;
107 1.12.16.2 matt u_int gs_inactive;
108 1.12.16.2 matt u_int gs_inactarg;
109 1.12.16.2 matt struct uvmpdpol_scanstate gs_scanstate;
110 1.12.16.2 matt };
111 1.12.16.2 matt
112 1.2 yamt struct uvmpdpol_globalstate {
113 1.12.16.2 matt struct uvmpdpol_groupstate *s_pggroups;
114 1.2 yamt struct uvm_pctparam s_anonmin;
115 1.2 yamt struct uvm_pctparam s_filemin;
116 1.2 yamt struct uvm_pctparam s_execmin;
117 1.2 yamt struct uvm_pctparam s_anonmax;
118 1.2 yamt struct uvm_pctparam s_filemax;
119 1.2 yamt struct uvm_pctparam s_execmax;
120 1.2 yamt struct uvm_pctparam s_inactivepct;
121 1.2 yamt };
122 1.2 yamt
123 1.2 yamt
124 1.2 yamt static struct uvmpdpol_globalstate pdpol_state;
125 1.2 yamt
126 1.2 yamt PDPOL_EVCNT_DEFINE(reactexec)
127 1.2 yamt PDPOL_EVCNT_DEFINE(reactfile)
128 1.2 yamt PDPOL_EVCNT_DEFINE(reactanon)
129 1.2 yamt
130 1.12.16.2 matt #ifdef DEBUG
131 1.12.16.2 matt static size_t
132 1.12.16.2 matt clock_pglist_count(struct pglist *pglist)
133 1.12.16.2 matt {
134 1.12.16.2 matt size_t count = 0;
135 1.12.16.2 matt struct vm_page *pg;
136 1.12.16.2 matt TAILQ_FOREACH(pg, pglist, pageq.queue) {
137 1.12.16.2 matt count++;
138 1.12.16.2 matt }
139 1.12.16.2 matt return count;
140 1.12.16.2 matt }
141 1.12.16.2 matt #endif
142 1.12.16.2 matt
143 1.12.16.2 matt static size_t
144 1.12.16.2 matt clock_space(void)
145 1.12.16.2 matt {
146 1.12.16.2 matt return sizeof(struct uvmpdpol_groupstate);
147 1.12.16.2 matt }
148 1.12.16.2 matt
149 1.2 yamt static void
150 1.12.16.2 matt clock_tune(struct uvm_pggroup *grp)
151 1.2 yamt {
152 1.12.16.2 matt struct uvmpdpol_globalstate * const s = &pdpol_state;
153 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
154 1.2 yamt
155 1.12.16.2 matt gs->gs_inactarg = UVM_PCTPARAM_APPLY(&s->s_inactivepct,
156 1.12.16.2 matt gs->gs_active + gs->gs_inactive);
157 1.12.16.2 matt if (gs->gs_inactarg <= grp->pgrp_freetarg) {
158 1.12.16.2 matt gs->gs_inactarg = grp->pgrp_freetarg + 1;
159 1.2 yamt }
160 1.2 yamt }
161 1.2 yamt
162 1.2 yamt void
163 1.12.16.2 matt uvmpdpol_scaninit(struct uvm_pggroup *grp)
164 1.2 yamt {
165 1.12.16.2 matt struct uvmpdpol_globalstate * const s = &pdpol_state;
166 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
167 1.12.16.2 matt struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
168 1.7 thorpej bool anonunder, fileunder, execunder;
169 1.7 thorpej bool anonover, fileover, execover;
170 1.7 thorpej bool anonreact, filereact, execreact;
171 1.2 yamt
172 1.2 yamt /*
173 1.2 yamt * decide which types of pages we want to reactivate instead of freeing
174 1.2 yamt * to keep usage within the minimum and maximum usage limits.
175 1.2 yamt */
176 1.2 yamt
177 1.12.16.2 matt u_int t = gs->gs_active + gs->gs_inactive + grp->pgrp_free;
178 1.12.16.2 matt anonunder = grp->pgrp_anonpages <= UVM_PCTPARAM_APPLY(&s->s_anonmin, t);
179 1.12.16.2 matt fileunder = grp->pgrp_filepages <= UVM_PCTPARAM_APPLY(&s->s_filemin, t);
180 1.12.16.2 matt execunder = grp->pgrp_execpages <= UVM_PCTPARAM_APPLY(&s->s_execmin, t);
181 1.12.16.2 matt anonover = grp->pgrp_anonpages > UVM_PCTPARAM_APPLY(&s->s_anonmax, t);
182 1.12.16.2 matt fileover = grp->pgrp_filepages > UVM_PCTPARAM_APPLY(&s->s_filemax, t);
183 1.12.16.2 matt execover = grp->pgrp_execpages > UVM_PCTPARAM_APPLY(&s->s_execmax, t);
184 1.2 yamt anonreact = anonunder || (!anonover && (fileover || execover));
185 1.2 yamt filereact = fileunder || (!fileover && (anonover || execover));
186 1.2 yamt execreact = execunder || (!execover && (anonover || fileover));
187 1.2 yamt if (filereact && execreact && (anonreact || uvm_swapisfull())) {
188 1.8 thorpej anonreact = filereact = execreact = false;
189 1.2 yamt }
190 1.2 yamt ss->ss_anonreact = anonreact;
191 1.2 yamt ss->ss_filereact = filereact;
192 1.2 yamt ss->ss_execreact = execreact;
193 1.2 yamt
194 1.8 thorpej ss->ss_first = true;
195 1.2 yamt }
196 1.2 yamt
197 1.2 yamt struct vm_page *
198 1.12.16.2 matt uvmpdpol_selectvictim(struct uvm_pggroup *grp)
199 1.2 yamt {
200 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
201 1.12.16.2 matt struct uvmpdpol_scanstate * const ss = &gs->gs_scanstate;
202 1.2 yamt struct vm_page *pg;
203 1.12.16.2 matt UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
204 1.2 yamt
205 1.9 ad KASSERT(mutex_owned(&uvm_pageqlock));
206 1.2 yamt
207 1.2 yamt while (/* CONSTCOND */ 1) {
208 1.2 yamt struct vm_anon *anon;
209 1.2 yamt struct uvm_object *uobj;
210 1.2 yamt
211 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
212 1.12.16.2 matt
213 1.2 yamt if (ss->ss_first) {
214 1.12.16.2 matt pg = TAILQ_FIRST(&gs->gs_inactiveq);
215 1.8 thorpej ss->ss_first = false;
216 1.12.16.2 matt UVMHIST_LOG(pdhist, " select first inactive page: %p",
217 1.12.16.2 matt pg, 0, 0, 0);
218 1.2 yamt } else {
219 1.2 yamt pg = ss->ss_nextpg;
220 1.2 yamt if (pg != NULL && (pg->pqflags & PQ_INACTIVE) == 0) {
221 1.12.16.2 matt pg = TAILQ_FIRST(&gs->gs_inactiveq);
222 1.2 yamt }
223 1.12.16.2 matt UVMHIST_LOG(pdhist, " select next inactive page: %p",
224 1.12.16.2 matt pg, 0, 0, 0);
225 1.2 yamt }
226 1.2 yamt if (pg == NULL) {
227 1.2 yamt break;
228 1.2 yamt }
229 1.12 ad ss->ss_nextpg = TAILQ_NEXT(pg, pageq.queue);
230 1.2 yamt
231 1.12.16.2 matt grp->pgrp_pdscans++;
232 1.2 yamt
233 1.2 yamt /*
234 1.2 yamt * move referenced pages back to active queue and
235 1.2 yamt * skip to next page.
236 1.2 yamt */
237 1.2 yamt
238 1.2 yamt if (pmap_is_referenced(pg)) {
239 1.2 yamt uvmpdpol_pageactivate(pg);
240 1.12.16.2 matt grp->pgrp_pdreact++;
241 1.2 yamt continue;
242 1.2 yamt }
243 1.2 yamt
244 1.2 yamt anon = pg->uanon;
245 1.2 yamt uobj = pg->uobject;
246 1.2 yamt
247 1.2 yamt /*
248 1.2 yamt * enforce the minimum thresholds on different
249 1.2 yamt * types of memory usage. if reusing the current
250 1.2 yamt * page would reduce that type of usage below its
251 1.2 yamt * minimum, reactivate the page instead and move
252 1.2 yamt * on to the next page.
253 1.2 yamt */
254 1.2 yamt
255 1.2 yamt if (uobj && UVM_OBJ_IS_VTEXT(uobj) && ss->ss_execreact) {
256 1.2 yamt uvmpdpol_pageactivate(pg);
257 1.2 yamt PDPOL_EVCNT_INCR(reactexec);
258 1.2 yamt continue;
259 1.2 yamt }
260 1.2 yamt if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
261 1.2 yamt !UVM_OBJ_IS_VTEXT(uobj) && ss->ss_filereact) {
262 1.2 yamt uvmpdpol_pageactivate(pg);
263 1.2 yamt PDPOL_EVCNT_INCR(reactfile);
264 1.2 yamt continue;
265 1.2 yamt }
266 1.2 yamt if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
267 1.2 yamt uvmpdpol_pageactivate(pg);
268 1.2 yamt PDPOL_EVCNT_INCR(reactanon);
269 1.2 yamt continue;
270 1.2 yamt }
271 1.2 yamt
272 1.2 yamt break;
273 1.2 yamt }
274 1.2 yamt
275 1.2 yamt return pg;
276 1.2 yamt }
277 1.2 yamt
278 1.2 yamt void
279 1.12.16.2 matt uvmpdpol_balancequeue(struct uvm_pggroup *grp, u_int swap_shortage)
280 1.2 yamt {
281 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
282 1.12.16.2 matt
283 1.12.16.1 matt struct vm_page *pg, *nextpg;
284 1.2 yamt
285 1.2 yamt /*
286 1.2 yamt * we have done the scan to get free pages. now we work on meeting
287 1.2 yamt * our inactive target.
288 1.2 yamt */
289 1.2 yamt
290 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
291 1.12.16.2 matt
292 1.12.16.2 matt u_int inactive_shortage = gs->gs_inactarg - gs->gs_inactive;
293 1.12.16.2 matt for (pg = TAILQ_FIRST(&gs->gs_activeq);
294 1.12.16.1 matt pg != NULL && (inactive_shortage > 0 || swap_shortage > 0);
295 1.12.16.1 matt pg = nextpg) {
296 1.12.16.1 matt nextpg = TAILQ_NEXT(pg, pageq.queue);
297 1.2 yamt
298 1.2 yamt /*
299 1.2 yamt * if there's a shortage of swap slots, try to free it.
300 1.2 yamt */
301 1.2 yamt
302 1.12.16.2 matt #ifdef VMSWAP
303 1.12.16.1 matt if (swap_shortage > 0 && (pg->pqflags & PQ_SWAPBACKED) != 0) {
304 1.12.16.1 matt if (uvmpd_trydropswap(pg)) {
305 1.2 yamt swap_shortage--;
306 1.2 yamt }
307 1.2 yamt }
308 1.12.16.2 matt #endif
309 1.2 yamt
310 1.2 yamt /*
311 1.2 yamt * if there's a shortage of inactive pages, deactivate.
312 1.2 yamt */
313 1.2 yamt
314 1.2 yamt if (inactive_shortage > 0) {
315 1.2 yamt /* no need to check wire_count as pg is "active" */
316 1.12.16.1 matt uvmpdpol_pagedeactivate(pg);
317 1.12.16.2 matt grp->pgrp_pddeact++;
318 1.2 yamt inactive_shortage--;
319 1.2 yamt }
320 1.2 yamt }
321 1.12.16.2 matt
322 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
323 1.2 yamt }
324 1.2 yamt
325 1.2 yamt void
326 1.2 yamt uvmpdpol_pagedeactivate(struct vm_page *pg)
327 1.2 yamt {
328 1.12.16.2 matt struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
329 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
330 1.2 yamt
331 1.9 ad KASSERT(mutex_owned(&uvm_pageqlock));
332 1.12.16.2 matt
333 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
334 1.12.16.2 matt
335 1.2 yamt if (pg->pqflags & PQ_ACTIVE) {
336 1.12.16.2 matt TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
337 1.2 yamt pg->pqflags &= ~PQ_ACTIVE;
338 1.12.16.2 matt KASSERT(gs->gs_active > 0);
339 1.12.16.2 matt gs->gs_active--;
340 1.12.16.2 matt grp->pgrp_active--;
341 1.2 yamt }
342 1.12.16.2 matt
343 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
344 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
345 1.12.16.2 matt
346 1.2 yamt if ((pg->pqflags & PQ_INACTIVE) == 0) {
347 1.2 yamt KASSERT(pg->wire_count == 0);
348 1.10 yamt pmap_clear_reference(pg);
349 1.12.16.2 matt TAILQ_INSERT_TAIL(&gs->gs_inactiveq, pg, pageq.queue);
350 1.2 yamt pg->pqflags |= PQ_INACTIVE;
351 1.12.16.2 matt gs->gs_inactive++;
352 1.12.16.2 matt grp->pgrp_inactive++;
353 1.2 yamt }
354 1.12.16.2 matt
355 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
356 1.2 yamt }
357 1.2 yamt
358 1.2 yamt void
359 1.2 yamt uvmpdpol_pageactivate(struct vm_page *pg)
360 1.2 yamt {
361 1.12.16.2 matt struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
362 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
363 1.2 yamt
364 1.2 yamt uvmpdpol_pagedequeue(pg);
365 1.12.16.2 matt TAILQ_INSERT_TAIL(&gs->gs_activeq, pg, pageq.queue);
366 1.2 yamt pg->pqflags |= PQ_ACTIVE;
367 1.12.16.2 matt gs->gs_active++;
368 1.12.16.2 matt grp->pgrp_active++;
369 1.12.16.2 matt
370 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
371 1.2 yamt }
372 1.2 yamt
373 1.2 yamt void
374 1.2 yamt uvmpdpol_pagedequeue(struct vm_page *pg)
375 1.2 yamt {
376 1.12.16.2 matt struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
377 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
378 1.12.16.2 matt
379 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
380 1.2 yamt
381 1.2 yamt if (pg->pqflags & PQ_ACTIVE) {
382 1.9 ad KASSERT(mutex_owned(&uvm_pageqlock));
383 1.12.16.2 matt TAILQ_REMOVE(&gs->gs_activeq, pg, pageq.queue);
384 1.2 yamt pg->pqflags &= ~PQ_ACTIVE;
385 1.12.16.2 matt KASSERT(gs->gs_active > 0);
386 1.12.16.2 matt gs->gs_active--;
387 1.12.16.2 matt grp->pgrp_active--;
388 1.12.16.2 matt }
389 1.12.16.2 matt
390 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
391 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
392 1.12.16.2 matt
393 1.12.16.2 matt if (pg->pqflags & PQ_INACTIVE) {
394 1.9 ad KASSERT(mutex_owned(&uvm_pageqlock));
395 1.12.16.2 matt TAILQ_REMOVE(&gs->gs_inactiveq, pg, pageq.queue);
396 1.2 yamt pg->pqflags &= ~PQ_INACTIVE;
397 1.12.16.2 matt KASSERT(gs->gs_inactive > 0);
398 1.12.16.2 matt gs->gs_inactive--;
399 1.12.16.2 matt grp->pgrp_inactive--;
400 1.2 yamt }
401 1.12.16.2 matt
402 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
403 1.2 yamt }
404 1.2 yamt
405 1.2 yamt void
406 1.2 yamt uvmpdpol_pageenqueue(struct vm_page *pg)
407 1.2 yamt {
408 1.2 yamt
409 1.2 yamt uvmpdpol_pageactivate(pg);
410 1.2 yamt }
411 1.2 yamt
412 1.2 yamt void
413 1.5 yamt uvmpdpol_anfree(struct vm_anon *an)
414 1.2 yamt {
415 1.2 yamt }
416 1.2 yamt
417 1.7 thorpej bool
418 1.2 yamt uvmpdpol_pageisqueued_p(struct vm_page *pg)
419 1.2 yamt {
420 1.2 yamt
421 1.2 yamt return (pg->pqflags & (PQ_ACTIVE | PQ_INACTIVE)) != 0;
422 1.2 yamt }
423 1.2 yamt
424 1.2 yamt void
425 1.12.16.2 matt uvmpdpol_estimatepageable(u_int *activep, u_int *inactivep)
426 1.2 yamt {
427 1.12.16.2 matt struct uvm_pggroup *grp;
428 1.12.16.2 matt u_int active = 0, inactive = 0;
429 1.12.16.2 matt
430 1.12.16.2 matt STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
431 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
432 1.2 yamt
433 1.12.16.3 matt //KDASSERT(gs->gs_active == clock_pglist_count(&gs->gs_activeq));
434 1.12.16.3 matt //KDASSERT(gs->gs_inactive == clock_pglist_count(&gs->gs_inactiveq));
435 1.12.16.2 matt
436 1.12.16.2 matt active += gs->gs_active;
437 1.12.16.2 matt inactive += gs->gs_inactive;
438 1.12.16.2 matt }
439 1.12.16.2 matt
440 1.12.16.2 matt if (activep) {
441 1.12.16.2 matt *activep = active;
442 1.2 yamt }
443 1.12.16.2 matt if (inactivep) {
444 1.12.16.2 matt *inactivep = inactive;
445 1.2 yamt }
446 1.2 yamt }
447 1.2 yamt
448 1.2 yamt #if !defined(PDSIM)
449 1.2 yamt static int
450 1.2 yamt min_check(struct uvm_pctparam *pct, int t)
451 1.2 yamt {
452 1.12.16.2 matt struct uvmpdpol_globalstate * const s = &pdpol_state;
453 1.12.16.2 matt u_int total = t;
454 1.2 yamt
455 1.2 yamt if (pct != &s->s_anonmin) {
456 1.2 yamt total += uvm_pctparam_get(&s->s_anonmin);
457 1.2 yamt }
458 1.2 yamt if (pct != &s->s_filemin) {
459 1.2 yamt total += uvm_pctparam_get(&s->s_filemin);
460 1.2 yamt }
461 1.2 yamt if (pct != &s->s_execmin) {
462 1.2 yamt total += uvm_pctparam_get(&s->s_execmin);
463 1.2 yamt }
464 1.2 yamt if (total > 95) {
465 1.2 yamt return EINVAL;
466 1.2 yamt }
467 1.2 yamt return 0;
468 1.2 yamt }
469 1.2 yamt #endif /* !defined(PDSIM) */
470 1.2 yamt
471 1.2 yamt void
472 1.12.16.2 matt uvmpdpol_init(void *new_gs, size_t npggroups)
473 1.2 yamt {
474 1.12.16.2 matt struct uvmpdpol_globalstate * const s = &pdpol_state;
475 1.12.16.2 matt struct uvmpdpol_groupstate *gs = new_gs;
476 1.12.16.2 matt
477 1.12.16.2 matt s->s_pggroups = gs;
478 1.2 yamt
479 1.12.16.2 matt struct uvm_pggroup *grp = uvm.pggroups;
480 1.12.16.2 matt for (size_t pggroup = 0; pggroup < npggroups; pggroup++, gs++, grp++) {
481 1.12.16.2 matt TAILQ_INIT(&gs->gs_activeq);
482 1.12.16.2 matt TAILQ_INIT(&gs->gs_inactiveq);
483 1.12.16.2 matt grp->pgrp_gs = gs;
484 1.12.16.2 matt KASSERT(gs->gs_active == 0);
485 1.12.16.2 matt KASSERT(gs->gs_inactive == 0);
486 1.12.16.2 matt KASSERT(grp->pgrp_active == 0);
487 1.12.16.2 matt KASSERT(grp->pgrp_inactive == 0);
488 1.12.16.2 matt }
489 1.2 yamt uvm_pctparam_init(&s->s_inactivepct, CLOCK_INACTIVEPCT, NULL);
490 1.2 yamt uvm_pctparam_init(&s->s_anonmin, 10, min_check);
491 1.2 yamt uvm_pctparam_init(&s->s_filemin, 10, min_check);
492 1.2 yamt uvm_pctparam_init(&s->s_execmin, 5, min_check);
493 1.2 yamt uvm_pctparam_init(&s->s_anonmax, 80, NULL);
494 1.2 yamt uvm_pctparam_init(&s->s_filemax, 50, NULL);
495 1.2 yamt uvm_pctparam_init(&s->s_execmax, 30, NULL);
496 1.12.16.2 matt
497 1.12.16.2 matt STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
498 1.12.16.2 matt uvmpdpol_scaninit(grp);
499 1.12.16.2 matt }
500 1.2 yamt }
501 1.2 yamt
502 1.2 yamt void
503 1.2 yamt uvmpdpol_reinit(void)
504 1.2 yamt {
505 1.2 yamt }
506 1.2 yamt
507 1.7 thorpej bool
508 1.12.16.2 matt uvmpdpol_needsscan_p(struct uvm_pggroup *grp)
509 1.2 yamt {
510 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = grp->pgrp_gs;
511 1.2 yamt
512 1.12.16.2 matt return gs->gs_inactive < gs->gs_inactarg;
513 1.2 yamt }
514 1.2 yamt
515 1.2 yamt void
516 1.12.16.2 matt uvmpdpol_tune(struct uvm_pggroup *grp)
517 1.2 yamt {
518 1.2 yamt
519 1.12.16.2 matt clock_tune(grp);
520 1.12.16.2 matt }
521 1.12.16.2 matt
522 1.12.16.2 matt size_t
523 1.12.16.2 matt uvmpdpol_space(void)
524 1.12.16.2 matt {
525 1.12.16.2 matt
526 1.12.16.2 matt return clock_space();
527 1.12.16.2 matt }
528 1.12.16.2 matt
529 1.12.16.2 matt void
530 1.12.16.2 matt uvmpdpol_recolor(void *new_gs, struct uvm_pggroup *grparray,
531 1.12.16.2 matt size_t npggroups, size_t old_ncolors)
532 1.12.16.2 matt {
533 1.12.16.2 matt struct uvmpdpol_globalstate * const s = &pdpol_state;
534 1.12.16.2 matt struct uvmpdpol_groupstate * src_gs = s->s_pggroups;
535 1.12.16.2 matt struct uvmpdpol_groupstate * const gs = new_gs;
536 1.12.16.2 matt
537 1.12.16.2 matt s->s_pggroups = gs;
538 1.12.16.2 matt
539 1.12.16.2 matt for (size_t i = 0; i < npggroups; i++) {
540 1.12.16.2 matt struct uvmpdpol_groupstate * const dst_gs = &gs[i];
541 1.12.16.2 matt TAILQ_INIT(&dst_gs->gs_activeq);
542 1.12.16.2 matt TAILQ_INIT(&dst_gs->gs_inactiveq);
543 1.12.16.2 matt uvm.pggroups[i].pgrp_gs = dst_gs;
544 1.12.16.2 matt }
545 1.12.16.2 matt
546 1.12.16.2 matt const size_t old_npggroups = VM_NPGGROUP(old_ncolors);
547 1.12.16.2 matt for (size_t i = 0; i < old_npggroups; i++, src_gs++) {
548 1.12.16.2 matt struct vm_page *pg;
549 1.12.16.2 matt KDASSERT(src_gs->gs_inactive == clock_pglist_count(&src_gs->gs_inactiveq));
550 1.12.16.2 matt while ((pg = TAILQ_FIRST(&src_gs->gs_inactiveq)) != NULL) {
551 1.12.16.2 matt u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
552 1.12.16.2 matt struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
553 1.12.16.2 matt
554 1.12.16.2 matt TAILQ_INSERT_TAIL(&xgs->gs_inactiveq, pg, pageq.queue);
555 1.12.16.2 matt src_gs->gs_inactive--;
556 1.12.16.2 matt xgs->gs_inactive++;
557 1.12.16.2 matt uvm.pggroups[pggroup].pgrp_inactive++;
558 1.12.16.2 matt KDASSERT(xgs->gs_inactive == clock_pglist_count(&xgs->gs_inactiveq));
559 1.12.16.2 matt }
560 1.12.16.2 matt KASSERT(src_gs->gs_inactive == 0);
561 1.12.16.2 matt
562 1.12.16.2 matt KDASSERT(src_gs->gs_active == clock_pglist_count(&src_gs->gs_activeq));
563 1.12.16.2 matt while ((pg = TAILQ_FIRST(&src_gs->gs_activeq)) != NULL) {
564 1.12.16.2 matt u_int pggroup = VM_PAGE_TO_PGGROUP(pg, uvmexp.ncolors);
565 1.12.16.2 matt struct uvmpdpol_groupstate * const xgs = &gs[pggroup];
566 1.12.16.2 matt
567 1.12.16.2 matt TAILQ_INSERT_TAIL(&xgs->gs_activeq, pg, pageq.queue);
568 1.12.16.2 matt src_gs->gs_active--;
569 1.12.16.2 matt xgs->gs_active++;
570 1.12.16.2 matt KDASSERT(xgs->gs_active == clock_pglist_count(&xgs->gs_activeq));
571 1.12.16.2 matt uvm.pggroups[pggroup].pgrp_active++;
572 1.12.16.2 matt }
573 1.12.16.2 matt KASSERT(src_gs->gs_active == 0);
574 1.12.16.2 matt }
575 1.12.16.2 matt
576 1.12.16.2 matt struct uvm_pggroup *grp;
577 1.12.16.2 matt STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
578 1.12.16.2 matt clock_tune(grp);
579 1.12.16.2 matt uvmpdpol_scaninit(grp);
580 1.12.16.2 matt }
581 1.2 yamt }
582 1.2 yamt
583 1.2 yamt #if !defined(PDSIM)
584 1.2 yamt
585 1.2 yamt #include <sys/sysctl.h> /* XXX SYSCTL_DESCR */
586 1.2 yamt
587 1.2 yamt void
588 1.2 yamt uvmpdpol_sysctlsetup(void)
589 1.2 yamt {
590 1.2 yamt struct uvmpdpol_globalstate *s = &pdpol_state;
591 1.2 yamt
592 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_anonmin, "anonmin",
593 1.2 yamt SYSCTL_DESCR("Percentage of physical memory reserved "
594 1.2 yamt "for anonymous application data"));
595 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_filemin, "filemin",
596 1.2 yamt SYSCTL_DESCR("Percentage of physical memory reserved "
597 1.11 martin "for cached file data"));
598 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_execmin, "execmin",
599 1.2 yamt SYSCTL_DESCR("Percentage of physical memory reserved "
600 1.11 martin "for cached executable data"));
601 1.2 yamt
602 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_anonmax, "anonmax",
603 1.2 yamt SYSCTL_DESCR("Percentage of physical memory which will "
604 1.2 yamt "be reclaimed from other usage for "
605 1.2 yamt "anonymous application data"));
606 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_filemax, "filemax",
607 1.2 yamt SYSCTL_DESCR("Percentage of physical memory which will "
608 1.2 yamt "be reclaimed from other usage for cached "
609 1.2 yamt "file data"));
610 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_execmax, "execmax",
611 1.2 yamt SYSCTL_DESCR("Percentage of physical memory which will "
612 1.2 yamt "be reclaimed from other usage for cached "
613 1.2 yamt "executable data"));
614 1.2 yamt
615 1.2 yamt uvm_pctparam_createsysctlnode(&s->s_inactivepct, "inactivepct",
616 1.2 yamt SYSCTL_DESCR("Percentage of inactive queue of "
617 1.2 yamt "the entire (active + inactive) queue"));
618 1.2 yamt }
619 1.2 yamt
620 1.2 yamt #endif /* !defined(PDSIM) */
621 1.2 yamt
622 1.2 yamt #if defined(PDSIM)
623 1.2 yamt void
624 1.2 yamt pdsim_dump(const char *id)
625 1.2 yamt {
626 1.2 yamt #if defined(DEBUG)
627 1.2 yamt /* XXX */
628 1.2 yamt #endif /* defined(DEBUG) */
629 1.2 yamt }
630 1.2 yamt #endif /* defined(PDSIM) */
631