uvm_pdaemon.c revision 1.29.2.6 1 1.29.2.6 nathanw /* $NetBSD: uvm_pdaemon.c,v 1.29.2.6 2001/10/08 20:11:57 nathanw Exp $ */
2 1.1 mrg
3 1.29.2.2 nathanw /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.29.2.2 nathanw * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.29.2.2 nathanw * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
42 1.4 mrg * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.29.2.2 nathanw *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.29.2.2 nathanw *
54 1.29.2.2 nathanw * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.29.2.2 nathanw * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.29.2.2 nathanw *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.1 mrg
69 1.7 mrg #include "opt_uvmhist.h"
70 1.7 mrg
71 1.1 mrg /*
72 1.1 mrg * uvm_pdaemon.c: the page daemon
73 1.1 mrg */
74 1.1 mrg
75 1.1 mrg #include <sys/param.h>
76 1.1 mrg #include <sys/proc.h>
77 1.1 mrg #include <sys/systm.h>
78 1.1 mrg #include <sys/kernel.h>
79 1.9 pk #include <sys/pool.h>
80 1.24 chs #include <sys/buf.h>
81 1.29.2.1 nathanw #include <sys/vnode.h>
82 1.1 mrg
83 1.1 mrg #include <uvm/uvm.h>
84 1.1 mrg
85 1.1 mrg /*
86 1.14 chs * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedeamon will reactivate
87 1.14 chs * in a pass thru the inactive list when swap is full. the value should be
88 1.14 chs * "small"... if it's too large we'll cycle the active pages thru the inactive
89 1.14 chs * queue too quickly to for them to be referenced and avoid being freed.
90 1.14 chs */
91 1.14 chs
92 1.14 chs #define UVMPD_NUMDIRTYREACTS 16
93 1.14 chs
94 1.14 chs
95 1.14 chs /*
96 1.1 mrg * local prototypes
97 1.1 mrg */
98 1.1 mrg
99 1.29.2.4 nathanw void uvmpd_scan __P((void));
100 1.29.2.4 nathanw boolean_t uvmpd_scan_inactive __P((struct pglist *));
101 1.29.2.4 nathanw void uvmpd_tune __P((void));
102 1.1 mrg
103 1.1 mrg /*
104 1.1 mrg * uvm_wait: wait (sleep) for the page daemon to free some pages
105 1.1 mrg *
106 1.1 mrg * => should be called with all locks released
107 1.1 mrg * => should _not_ be called by the page daemon (to avoid deadlock)
108 1.1 mrg */
109 1.1 mrg
110 1.19 thorpej void
111 1.19 thorpej uvm_wait(wmsg)
112 1.19 thorpej const char *wmsg;
113 1.8 mrg {
114 1.8 mrg int timo = 0;
115 1.8 mrg int s = splbio();
116 1.1 mrg
117 1.8 mrg /*
118 1.8 mrg * check for page daemon going to sleep (waiting for itself)
119 1.8 mrg */
120 1.1 mrg
121 1.29.2.4 nathanw if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
122 1.8 mrg /*
123 1.8 mrg * now we have a problem: the pagedaemon wants to go to
124 1.8 mrg * sleep until it frees more memory. but how can it
125 1.8 mrg * free more memory if it is asleep? that is a deadlock.
126 1.8 mrg * we have two options:
127 1.8 mrg * [1] panic now
128 1.8 mrg * [2] put a timeout on the sleep, thus causing the
129 1.8 mrg * pagedaemon to only pause (rather than sleep forever)
130 1.8 mrg *
131 1.8 mrg * note that option [2] will only help us if we get lucky
132 1.8 mrg * and some other process on the system breaks the deadlock
133 1.8 mrg * by exiting or freeing memory (thus allowing the pagedaemon
134 1.8 mrg * to continue). for now we panic if DEBUG is defined,
135 1.8 mrg * otherwise we hope for the best with option [2] (better
136 1.8 mrg * yet, this should never happen in the first place!).
137 1.8 mrg */
138 1.1 mrg
139 1.8 mrg printf("pagedaemon: deadlock detected!\n");
140 1.8 mrg timo = hz >> 3; /* set timeout */
141 1.1 mrg #if defined(DEBUG)
142 1.8 mrg /* DEBUG: panic so we can debug it */
143 1.8 mrg panic("pagedaemon deadlock");
144 1.1 mrg #endif
145 1.8 mrg }
146 1.1 mrg
147 1.8 mrg simple_lock(&uvm.pagedaemon_lock);
148 1.17 thorpej wakeup(&uvm.pagedaemon); /* wake the daemon! */
149 1.8 mrg UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
150 1.8 mrg timo);
151 1.1 mrg
152 1.8 mrg splx(s);
153 1.1 mrg }
154 1.1 mrg
155 1.1 mrg
156 1.1 mrg /*
157 1.1 mrg * uvmpd_tune: tune paging parameters
158 1.1 mrg *
159 1.1 mrg * => called when ever memory is added (or removed?) to the system
160 1.1 mrg * => caller must call with page queues locked
161 1.1 mrg */
162 1.1 mrg
163 1.29.2.4 nathanw void
164 1.29.2.4 nathanw uvmpd_tune(void)
165 1.8 mrg {
166 1.8 mrg UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
167 1.1 mrg
168 1.8 mrg uvmexp.freemin = uvmexp.npages / 20;
169 1.1 mrg
170 1.8 mrg /* between 16k and 256k */
171 1.8 mrg /* XXX: what are these values good for? */
172 1.29.2.4 nathanw uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
173 1.29.2.4 nathanw uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
174 1.23 bjh21
175 1.23 bjh21 /* Make sure there's always a user page free. */
176 1.23 bjh21 if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
177 1.23 bjh21 uvmexp.freemin = uvmexp.reserve_kernel + 1;
178 1.1 mrg
179 1.8 mrg uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
180 1.8 mrg if (uvmexp.freetarg <= uvmexp.freemin)
181 1.8 mrg uvmexp.freetarg = uvmexp.freemin + 1;
182 1.1 mrg
183 1.8 mrg /* uvmexp.inactarg: computed in main daemon loop */
184 1.1 mrg
185 1.8 mrg uvmexp.wiredmax = uvmexp.npages / 3;
186 1.8 mrg UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
187 1.1 mrg uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
188 1.1 mrg }
189 1.1 mrg
190 1.1 mrg /*
191 1.1 mrg * uvm_pageout: the main loop for the pagedaemon
192 1.1 mrg */
193 1.1 mrg
194 1.8 mrg void
195 1.22 thorpej uvm_pageout(void *arg)
196 1.8 mrg {
197 1.8 mrg int npages = 0;
198 1.8 mrg UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
199 1.24 chs
200 1.8 mrg UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
201 1.8 mrg
202 1.8 mrg /*
203 1.8 mrg * ensure correct priority and set paging parameters...
204 1.8 mrg */
205 1.8 mrg
206 1.8 mrg uvm.pagedaemon_proc = curproc;
207 1.8 mrg uvm_lock_pageq();
208 1.8 mrg npages = uvmexp.npages;
209 1.8 mrg uvmpd_tune();
210 1.8 mrg uvm_unlock_pageq();
211 1.8 mrg
212 1.8 mrg /*
213 1.8 mrg * main loop
214 1.8 mrg */
215 1.24 chs
216 1.24 chs for (;;) {
217 1.24 chs simple_lock(&uvm.pagedaemon_lock);
218 1.24 chs
219 1.24 chs UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
220 1.24 chs UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
221 1.24 chs &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
222 1.24 chs uvmexp.pdwoke++;
223 1.24 chs UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
224 1.24 chs
225 1.8 mrg /*
226 1.24 chs * now lock page queues and recompute inactive count
227 1.8 mrg */
228 1.8 mrg
229 1.24 chs uvm_lock_pageq();
230 1.24 chs if (npages != uvmexp.npages) { /* check for new pages? */
231 1.24 chs npages = uvmexp.npages;
232 1.24 chs uvmpd_tune();
233 1.24 chs }
234 1.24 chs
235 1.24 chs uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
236 1.24 chs if (uvmexp.inactarg <= uvmexp.freetarg) {
237 1.24 chs uvmexp.inactarg = uvmexp.freetarg + 1;
238 1.24 chs }
239 1.24 chs
240 1.24 chs UVMHIST_LOG(pdhist," free/ftarg=%d/%d, inact/itarg=%d/%d",
241 1.24 chs uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
242 1.24 chs uvmexp.inactarg);
243 1.8 mrg
244 1.8 mrg /*
245 1.24 chs * scan if needed
246 1.8 mrg */
247 1.8 mrg
248 1.24 chs if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
249 1.29.2.1 nathanw uvmexp.inactive < uvmexp.inactarg) {
250 1.24 chs uvmpd_scan();
251 1.8 mrg }
252 1.8 mrg
253 1.8 mrg /*
254 1.24 chs * if there's any free memory to be had,
255 1.24 chs * wake up any waiters.
256 1.8 mrg */
257 1.8 mrg
258 1.24 chs if (uvmexp.free > uvmexp.reserve_kernel ||
259 1.24 chs uvmexp.paging == 0) {
260 1.24 chs wakeup(&uvmexp.free);
261 1.8 mrg }
262 1.1 mrg
263 1.8 mrg /*
264 1.24 chs * scan done. unlock page queues (the only lock we are holding)
265 1.8 mrg */
266 1.8 mrg
267 1.24 chs uvm_unlock_pageq();
268 1.29.2.5 nathanw
269 1.29.2.5 nathanw /*
270 1.29.2.5 nathanw * drain pool resources now that we're not holding any locks
271 1.29.2.5 nathanw */
272 1.29.2.5 nathanw
273 1.29.2.5 nathanw pool_drain(0);
274 1.24 chs }
275 1.24 chs /*NOTREACHED*/
276 1.24 chs }
277 1.24 chs
278 1.8 mrg
279 1.24 chs /*
280 1.24 chs * uvm_aiodone_daemon: main loop for the aiodone daemon.
281 1.24 chs */
282 1.8 mrg
283 1.24 chs void
284 1.24 chs uvm_aiodone_daemon(void *arg)
285 1.24 chs {
286 1.24 chs int s, free;
287 1.24 chs struct buf *bp, *nbp;
288 1.24 chs UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
289 1.9 pk
290 1.24 chs for (;;) {
291 1.8 mrg
292 1.8 mrg /*
293 1.24 chs * carefully attempt to go to sleep (without losing "wakeups"!).
294 1.24 chs * we need splbio because we want to make sure the aio_done list
295 1.24 chs * is totally empty before we go to sleep.
296 1.8 mrg */
297 1.8 mrg
298 1.24 chs s = splbio();
299 1.24 chs simple_lock(&uvm.aiodoned_lock);
300 1.24 chs if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
301 1.24 chs UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
302 1.24 chs UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
303 1.24 chs &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
304 1.24 chs UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
305 1.24 chs
306 1.24 chs /* relock aiodoned_lock, still at splbio */
307 1.24 chs simple_lock(&uvm.aiodoned_lock);
308 1.8 mrg }
309 1.8 mrg
310 1.24 chs /*
311 1.24 chs * check for done aio structures
312 1.24 chs */
313 1.8 mrg
314 1.24 chs bp = TAILQ_FIRST(&uvm.aio_done);
315 1.24 chs if (bp) {
316 1.24 chs TAILQ_INIT(&uvm.aio_done);
317 1.24 chs }
318 1.8 mrg
319 1.24 chs simple_unlock(&uvm.aiodoned_lock);
320 1.24 chs splx(s);
321 1.8 mrg
322 1.8 mrg /*
323 1.24 chs * process each i/o that's done.
324 1.8 mrg */
325 1.8 mrg
326 1.24 chs free = uvmexp.free;
327 1.24 chs while (bp != NULL) {
328 1.24 chs nbp = TAILQ_NEXT(bp, b_freelist);
329 1.24 chs (*bp->b_iodone)(bp);
330 1.24 chs bp = nbp;
331 1.24 chs }
332 1.24 chs if (free <= uvmexp.reserve_kernel) {
333 1.24 chs s = uvm_lock_fpageq();
334 1.24 chs wakeup(&uvm.pagedaemon);
335 1.24 chs uvm_unlock_fpageq(s);
336 1.24 chs } else {
337 1.24 chs simple_lock(&uvm.pagedaemon_lock);
338 1.17 thorpej wakeup(&uvmexp.free);
339 1.24 chs simple_unlock(&uvm.pagedaemon_lock);
340 1.24 chs }
341 1.8 mrg }
342 1.1 mrg }
343 1.1 mrg
344 1.1 mrg /*
345 1.24 chs * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
346 1.1 mrg *
347 1.1 mrg * => called with page queues locked
348 1.1 mrg * => we work on meeting our free target by converting inactive pages
349 1.1 mrg * into free pages.
350 1.1 mrg * => we handle the building of swap-backed clusters
351 1.1 mrg * => we return TRUE if we are exiting because we met our target
352 1.1 mrg */
353 1.1 mrg
354 1.29.2.4 nathanw boolean_t
355 1.8 mrg uvmpd_scan_inactive(pglst)
356 1.8 mrg struct pglist *pglst;
357 1.8 mrg {
358 1.8 mrg boolean_t retval = FALSE; /* assume we haven't hit target */
359 1.29.2.4 nathanw int error;
360 1.8 mrg struct vm_page *p, *nextpg;
361 1.8 mrg struct uvm_object *uobj;
362 1.8 mrg struct vm_anon *anon;
363 1.29.2.4 nathanw struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT];
364 1.29.2.4 nathanw struct simplelock *slock;
365 1.29.2.4 nathanw int swnpages, swcpages;
366 1.29.2.4 nathanw int swslot;
367 1.29.2.4 nathanw int dirtyreacts, t, result;
368 1.8 mrg UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
369 1.1 mrg
370 1.8 mrg /*
371 1.8 mrg * swslot is non-zero if we are building a swap cluster. we want
372 1.24 chs * to stay in the loop while we have a page to scan or we have
373 1.8 mrg * a swap-cluster to build.
374 1.8 mrg */
375 1.24 chs
376 1.8 mrg swslot = 0;
377 1.8 mrg swnpages = swcpages = 0;
378 1.14 chs dirtyreacts = 0;
379 1.24 chs for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
380 1.24 chs uobj = NULL;
381 1.24 chs anon = NULL;
382 1.8 mrg if (p) {
383 1.24 chs
384 1.8 mrg /*
385 1.29.2.4 nathanw * see if we've met the free target.
386 1.8 mrg */
387 1.24 chs
388 1.29.2.4 nathanw if (uvmexp.free + uvmexp.paging >=
389 1.29.2.4 nathanw uvmexp.freetarg << 2 ||
390 1.29.2.1 nathanw dirtyreacts == UVMPD_NUMDIRTYREACTS) {
391 1.29.2.1 nathanw UVMHIST_LOG(pdhist," met free target: "
392 1.29.2.1 nathanw "exit loop", 0, 0, 0, 0);
393 1.29.2.1 nathanw retval = TRUE;
394 1.29.2.1 nathanw
395 1.29.2.1 nathanw if (swslot == 0) {
396 1.29.2.1 nathanw /* exit now if no swap-i/o pending */
397 1.29.2.1 nathanw break;
398 1.24 chs }
399 1.29.2.1 nathanw
400 1.29.2.1 nathanw /* set p to null to signal final swap i/o */
401 1.29.2.1 nathanw p = NULL;
402 1.29.2.4 nathanw nextpg = NULL;
403 1.8 mrg }
404 1.8 mrg }
405 1.24 chs if (p) { /* if (we have a new page to consider) */
406 1.29.2.1 nathanw
407 1.8 mrg /*
408 1.8 mrg * we are below target and have a new page to consider.
409 1.8 mrg */
410 1.29.2.4 nathanw
411 1.8 mrg uvmexp.pdscans++;
412 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
413 1.8 mrg
414 1.27 mycroft /*
415 1.27 mycroft * move referenced pages back to active queue and
416 1.29.2.1 nathanw * skip to next page.
417 1.27 mycroft */
418 1.29.2.1 nathanw
419 1.29.2.4 nathanw if (pmap_clear_reference(p)) {
420 1.27 mycroft uvm_pageactivate(p);
421 1.27 mycroft uvmexp.pdreact++;
422 1.27 mycroft continue;
423 1.27 mycroft }
424 1.29.2.4 nathanw anon = p->uanon;
425 1.29.2.4 nathanw uobj = p->uobject;
426 1.29.2.1 nathanw
427 1.29.2.1 nathanw /*
428 1.29.2.1 nathanw * enforce the minimum thresholds on different
429 1.29.2.1 nathanw * types of memory usage. if reusing the current
430 1.29.2.1 nathanw * page would reduce that type of usage below its
431 1.29.2.1 nathanw * minimum, reactivate the page instead and move
432 1.29.2.1 nathanw * on to the next page.
433 1.29.2.1 nathanw */
434 1.29.2.1 nathanw
435 1.29.2.1 nathanw t = uvmexp.active + uvmexp.inactive + uvmexp.free;
436 1.29.2.4 nathanw if (anon &&
437 1.29.2.1 nathanw uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8) {
438 1.29.2.1 nathanw uvm_pageactivate(p);
439 1.29.2.1 nathanw uvmexp.pdreanon++;
440 1.29.2.1 nathanw continue;
441 1.29.2.1 nathanw }
442 1.29.2.4 nathanw if (uobj && UVM_OBJ_IS_VTEXT(uobj) &&
443 1.29.2.1 nathanw uvmexp.vtextpages <= (t * uvmexp.vtextmin) >> 8) {
444 1.29.2.1 nathanw uvm_pageactivate(p);
445 1.29.2.1 nathanw uvmexp.pdrevtext++;
446 1.29.2.1 nathanw continue;
447 1.29.2.1 nathanw }
448 1.29.2.4 nathanw if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
449 1.29.2.4 nathanw !UVM_OBJ_IS_VTEXT(uobj) &&
450 1.29.2.1 nathanw uvmexp.vnodepages <= (t * uvmexp.vnodemin) >> 8) {
451 1.29.2.1 nathanw uvm_pageactivate(p);
452 1.29.2.1 nathanw uvmexp.pdrevnode++;
453 1.29.2.1 nathanw continue;
454 1.29.2.1 nathanw }
455 1.29.2.1 nathanw
456 1.8 mrg /*
457 1.8 mrg * first we attempt to lock the object that this page
458 1.8 mrg * belongs to. if our attempt fails we skip on to
459 1.8 mrg * the next page (no harm done). it is important to
460 1.8 mrg * "try" locking the object as we are locking in the
461 1.8 mrg * wrong order (pageq -> object) and we don't want to
462 1.24 chs * deadlock.
463 1.8 mrg *
464 1.24 chs * the only time we expect to see an ownerless page
465 1.8 mrg * (i.e. a page with no uobject and !PQ_ANON) is if an
466 1.8 mrg * anon has loaned a page from a uvm_object and the
467 1.8 mrg * uvm_object has dropped the ownership. in that
468 1.8 mrg * case, the anon can "take over" the loaned page
469 1.8 mrg * and make it its own.
470 1.8 mrg */
471 1.29.2.1 nathanw
472 1.8 mrg /* is page part of an anon or ownerless ? */
473 1.29.2.4 nathanw if ((p->pqflags & PQ_ANON) || uobj == NULL) {
474 1.24 chs KASSERT(anon != NULL);
475 1.29.2.4 nathanw slock = &anon->an_lock;
476 1.29.2.4 nathanw if (!simple_lock_try(slock)) {
477 1.8 mrg /* lock failed, skip this page */
478 1.8 mrg continue;
479 1.29.2.1 nathanw }
480 1.8 mrg
481 1.8 mrg /*
482 1.8 mrg * if the page is ownerless, claim it in the
483 1.8 mrg * name of "anon"!
484 1.8 mrg */
485 1.24 chs
486 1.8 mrg if ((p->pqflags & PQ_ANON) == 0) {
487 1.24 chs KASSERT(p->loan_count > 0);
488 1.8 mrg p->loan_count--;
489 1.24 chs p->pqflags |= PQ_ANON;
490 1.24 chs /* anon now owns it */
491 1.8 mrg }
492 1.8 mrg if (p->flags & PG_BUSY) {
493 1.29.2.4 nathanw simple_unlock(slock);
494 1.8 mrg uvmexp.pdbusy++;
495 1.8 mrg continue;
496 1.8 mrg }
497 1.8 mrg uvmexp.pdanscan++;
498 1.8 mrg } else {
499 1.24 chs KASSERT(uobj != NULL);
500 1.29.2.4 nathanw slock = &uobj->vmobjlock;
501 1.29.2.4 nathanw if (!simple_lock_try(slock)) {
502 1.24 chs continue;
503 1.29.2.1 nathanw }
504 1.8 mrg if (p->flags & PG_BUSY) {
505 1.29.2.4 nathanw simple_unlock(slock);
506 1.8 mrg uvmexp.pdbusy++;
507 1.24 chs continue;
508 1.8 mrg }
509 1.8 mrg uvmexp.pdobscan++;
510 1.8 mrg }
511 1.8 mrg
512 1.29.2.4 nathanw
513 1.8 mrg /*
514 1.8 mrg * we now have the object and the page queues locked.
515 1.29.2.4 nathanw * if the page is not swap-backed, call the object's
516 1.29.2.4 nathanw * pager to flush and free the page.
517 1.29.2.4 nathanw */
518 1.29.2.4 nathanw
519 1.29.2.4 nathanw if ((p->pqflags & PQ_SWAPBACKED) == 0) {
520 1.29.2.4 nathanw uvm_unlock_pageq();
521 1.29.2.4 nathanw error = (uobj->pgops->pgo_put)(uobj, p->offset,
522 1.29.2.4 nathanw p->offset + PAGE_SIZE,
523 1.29.2.4 nathanw PGO_CLEANIT|PGO_FREE);
524 1.29.2.4 nathanw uvm_lock_pageq();
525 1.29.2.4 nathanw if (nextpg &&
526 1.29.2.4 nathanw (nextpg->flags & PQ_INACTIVE) == 0) {
527 1.29.2.4 nathanw nextpg = TAILQ_FIRST(pglst);
528 1.29.2.4 nathanw }
529 1.29.2.4 nathanw continue;
530 1.29.2.4 nathanw }
531 1.29.2.4 nathanw
532 1.29.2.4 nathanw /*
533 1.29.2.4 nathanw * the page is swap-backed. remove all the permissions
534 1.29 thorpej * from the page so we can sync the modified info
535 1.29 thorpej * without any race conditions. if the page is clean
536 1.29 thorpej * we can free it now and continue.
537 1.8 mrg */
538 1.8 mrg
539 1.29 thorpej pmap_page_protect(p, VM_PROT_NONE);
540 1.29.2.4 nathanw if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
541 1.29.2.4 nathanw p->flags &= ~(PG_CLEAN);
542 1.29.2.1 nathanw }
543 1.8 mrg if (p->flags & PG_CLEAN) {
544 1.8 mrg uvm_pagefree(p);
545 1.8 mrg uvmexp.pdfreed++;
546 1.24 chs
547 1.29.2.4 nathanw /*
548 1.29.2.4 nathanw * for anons, we need to remove the page
549 1.29.2.4 nathanw * from the anon ourselves. for aobjs,
550 1.29.2.4 nathanw * pagefree did that for us.
551 1.29.2.4 nathanw */
552 1.24 chs
553 1.29.2.4 nathanw if (anon) {
554 1.24 chs KASSERT(anon->an_swslot != 0);
555 1.8 mrg anon->u.an_page = NULL;
556 1.8 mrg }
557 1.29.2.4 nathanw simple_unlock(slock);
558 1.8 mrg continue;
559 1.8 mrg }
560 1.8 mrg
561 1.8 mrg /*
562 1.8 mrg * this page is dirty, skip it if we'll have met our
563 1.8 mrg * free target when all the current pageouts complete.
564 1.8 mrg */
565 1.24 chs
566 1.29.2.4 nathanw if (uvmexp.free + uvmexp.paging >
567 1.29.2.4 nathanw uvmexp.freetarg << 2) {
568 1.29.2.4 nathanw simple_unlock(slock);
569 1.8 mrg continue;
570 1.8 mrg }
571 1.8 mrg
572 1.8 mrg /*
573 1.29.2.4 nathanw * free any swap space allocated to the page since
574 1.29.2.4 nathanw * we'll have to write it again with its new data.
575 1.29.2.4 nathanw */
576 1.29.2.4 nathanw
577 1.29.2.4 nathanw if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
578 1.29.2.4 nathanw uvm_swap_free(anon->an_swslot, 1);
579 1.29.2.4 nathanw anon->an_swslot = 0;
580 1.29.2.4 nathanw } else if (p->pqflags & PQ_AOBJ) {
581 1.29.2.4 nathanw uao_dropswap(uobj, p->offset >> PAGE_SHIFT);
582 1.29.2.4 nathanw }
583 1.29.2.4 nathanw
584 1.29.2.4 nathanw /*
585 1.29.2.4 nathanw * if all pages in swap are only in swap,
586 1.29.2.4 nathanw * the swap space is full and we can't page out
587 1.29.2.4 nathanw * any more swap-backed pages. reactivate this page
588 1.29.2.4 nathanw * so that we eventually cycle all pages through
589 1.29.2.4 nathanw * the inactive queue.
590 1.14 chs */
591 1.24 chs
592 1.24 chs KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
593 1.29.2.4 nathanw if (uvmexp.swpgonly == uvmexp.swpages) {
594 1.14 chs dirtyreacts++;
595 1.14 chs uvm_pageactivate(p);
596 1.29.2.4 nathanw simple_unlock(slock);
597 1.14 chs continue;
598 1.14 chs }
599 1.14 chs
600 1.14 chs /*
601 1.29.2.4 nathanw * start new swap pageout cluster (if necessary).
602 1.14 chs */
603 1.24 chs
604 1.29.2.4 nathanw if (swslot == 0) {
605 1.29.2.4 nathanw swnpages = MAXBSIZE >> PAGE_SHIFT;
606 1.29.2.4 nathanw swslot = uvm_swap_alloc(&swnpages, TRUE);
607 1.29.2.4 nathanw if (swslot == 0) {
608 1.29.2.4 nathanw simple_unlock(slock);
609 1.29.2.4 nathanw continue;
610 1.14 chs }
611 1.29.2.4 nathanw swcpages = 0;
612 1.14 chs }
613 1.14 chs
614 1.14 chs /*
615 1.29.2.4 nathanw * at this point, we're definitely going reuse this
616 1.29.2.4 nathanw * page. mark the page busy and delayed-free.
617 1.29.2.4 nathanw * we should remove the page from the page queues
618 1.29.2.4 nathanw * so we don't ever look at it again.
619 1.29.2.4 nathanw * adjust counters and such.
620 1.8 mrg */
621 1.29.2.1 nathanw
622 1.29.2.4 nathanw p->flags |= PG_BUSY;
623 1.8 mrg UVM_PAGE_OWN(p, "scan_inactive");
624 1.29.2.4 nathanw
625 1.29.2.4 nathanw p->flags |= PG_PAGEOUT;
626 1.29.2.4 nathanw uvmexp.paging++;
627 1.29.2.4 nathanw uvm_pagedequeue(p);
628 1.29.2.4 nathanw
629 1.8 mrg uvmexp.pgswapout++;
630 1.8 mrg
631 1.8 mrg /*
632 1.29.2.4 nathanw * add the new page to the cluster.
633 1.8 mrg */
634 1.24 chs
635 1.29.2.4 nathanw if (anon) {
636 1.29.2.4 nathanw anon->an_swslot = swslot + swcpages;
637 1.29.2.4 nathanw simple_unlock(slock);
638 1.29.2.4 nathanw } else {
639 1.29.2.4 nathanw result = uao_set_swslot(uobj,
640 1.29.2.4 nathanw p->offset >> PAGE_SHIFT, swslot + swcpages);
641 1.29.2.4 nathanw if (result == -1) {
642 1.29.2.4 nathanw p->flags &= ~(PG_BUSY|PG_PAGEOUT);
643 1.29.2.4 nathanw UVM_PAGE_OWN(p, NULL);
644 1.29.2.4 nathanw uvmexp.paging--;
645 1.29.2.4 nathanw uvm_pageactivate(p);
646 1.29.2.4 nathanw simple_unlock(slock);
647 1.29.2.4 nathanw continue;
648 1.8 mrg }
649 1.29.2.4 nathanw simple_unlock(slock);
650 1.29.2.4 nathanw }
651 1.29.2.4 nathanw swpps[swcpages] = p;
652 1.29.2.4 nathanw swcpages++;
653 1.8 mrg
654 1.29.2.4 nathanw /*
655 1.29.2.4 nathanw * if the cluster isn't full, look for more pages
656 1.29.2.4 nathanw * before starting the i/o.
657 1.29.2.4 nathanw */
658 1.24 chs
659 1.29.2.4 nathanw if (swcpages < swnpages) {
660 1.29.2.4 nathanw continue;
661 1.8 mrg }
662 1.8 mrg }
663 1.8 mrg
664 1.8 mrg /*
665 1.29.2.4 nathanw * if this is the final pageout we could have a few
666 1.29.2.4 nathanw * unused swap blocks. if so, free them now.
667 1.8 mrg */
668 1.24 chs
669 1.29.2.4 nathanw if (swcpages < swnpages) {
670 1.29.2.4 nathanw uvm_swap_free(swslot + swcpages, (swnpages - swcpages));
671 1.8 mrg }
672 1.8 mrg
673 1.8 mrg /*
674 1.29.2.4 nathanw * now start the pageout.
675 1.8 mrg */
676 1.8 mrg
677 1.29.2.4 nathanw uvm_unlock_pageq();
678 1.8 mrg uvmexp.pdpageouts++;
679 1.29.2.4 nathanw error = uvm_swap_put(swslot, swpps, swcpages, 0);
680 1.29.2.4 nathanw KASSERT(error == 0);
681 1.29.2.4 nathanw uvm_lock_pageq();
682 1.8 mrg
683 1.8 mrg /*
684 1.29.2.4 nathanw * zero swslot to indicate that we are
685 1.8 mrg * no longer building a swap-backed cluster.
686 1.8 mrg */
687 1.8 mrg
688 1.29.2.4 nathanw swslot = 0;
689 1.24 chs
690 1.8 mrg /*
691 1.29.2.1 nathanw * the pageout is in progress. bump counters and set up
692 1.29.2.1 nathanw * for the next loop.
693 1.8 mrg */
694 1.8 mrg
695 1.29.2.1 nathanw uvmexp.pdpending++;
696 1.29.2.4 nathanw if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
697 1.29.2.4 nathanw nextpg = TAILQ_FIRST(pglst);
698 1.8 mrg }
699 1.24 chs }
700 1.29.2.4 nathanw return (error);
701 1.1 mrg }
702 1.1 mrg
703 1.1 mrg /*
704 1.1 mrg * uvmpd_scan: scan the page queues and attempt to meet our targets.
705 1.1 mrg *
706 1.1 mrg * => called with pageq's locked
707 1.1 mrg */
708 1.1 mrg
709 1.8 mrg void
710 1.29.2.4 nathanw uvmpd_scan(void)
711 1.1 mrg {
712 1.29.2.4 nathanw int inactive_shortage, swap_shortage, pages_freed;
713 1.8 mrg struct vm_page *p, *nextpg;
714 1.8 mrg struct uvm_object *uobj;
715 1.29.2.4 nathanw struct vm_anon *anon;
716 1.8 mrg boolean_t got_it;
717 1.8 mrg UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
718 1.1 mrg
719 1.29.2.4 nathanw uvmexp.pdrevs++;
720 1.24 chs uobj = NULL;
721 1.29.2.4 nathanw anon = NULL;
722 1.1 mrg
723 1.1 mrg #ifndef __SWAP_BROKEN
724 1.29.2.6 nathanw
725 1.8 mrg /*
726 1.8 mrg * swap out some processes if we are below our free target.
727 1.8 mrg * we need to unlock the page queues for this.
728 1.8 mrg */
729 1.29.2.6 nathanw
730 1.29.2.6 nathanw if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
731 1.8 mrg uvmexp.pdswout++;
732 1.29.2.4 nathanw UVMHIST_LOG(pdhist," free %d < target %d: swapout",
733 1.29.2.4 nathanw uvmexp.free, uvmexp.freetarg, 0, 0);
734 1.8 mrg uvm_unlock_pageq();
735 1.8 mrg uvm_swapout_threads();
736 1.8 mrg uvm_lock_pageq();
737 1.1 mrg
738 1.8 mrg }
739 1.1 mrg #endif
740 1.1 mrg
741 1.8 mrg /*
742 1.8 mrg * now we want to work on meeting our targets. first we work on our
743 1.8 mrg * free target by converting inactive pages into free pages. then
744 1.8 mrg * we work on meeting our inactive target by converting active pages
745 1.8 mrg * to inactive ones.
746 1.8 mrg */
747 1.8 mrg
748 1.8 mrg UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
749 1.8 mrg
750 1.8 mrg /*
751 1.24 chs * alternate starting queue between swap and object based on the
752 1.24 chs * low bit of uvmexp.pdrevs (which we bump by one each call).
753 1.8 mrg */
754 1.8 mrg
755 1.8 mrg got_it = FALSE;
756 1.14 chs pages_freed = uvmexp.pdfreed;
757 1.29.2.2 nathanw (void) uvmpd_scan_inactive(&uvm.page_inactive);
758 1.14 chs pages_freed = uvmexp.pdfreed - pages_freed;
759 1.8 mrg
760 1.8 mrg /*
761 1.8 mrg * we have done the scan to get free pages. now we work on meeting
762 1.8 mrg * our inactive target.
763 1.8 mrg */
764 1.8 mrg
765 1.14 chs inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
766 1.14 chs
767 1.14 chs /*
768 1.14 chs * detect if we're not going to be able to page anything out
769 1.14 chs * until we free some swap resources from active pages.
770 1.14 chs */
771 1.24 chs
772 1.14 chs swap_shortage = 0;
773 1.14 chs if (uvmexp.free < uvmexp.freetarg &&
774 1.14 chs uvmexp.swpginuse == uvmexp.swpages &&
775 1.14 chs uvmexp.swpgonly < uvmexp.swpages &&
776 1.14 chs pages_freed == 0) {
777 1.14 chs swap_shortage = uvmexp.freetarg - uvmexp.free;
778 1.14 chs }
779 1.24 chs
780 1.14 chs UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%d swap_shortage=%d",
781 1.14 chs inactive_shortage, swap_shortage,0,0);
782 1.24 chs for (p = TAILQ_FIRST(&uvm.page_active);
783 1.14 chs p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
784 1.14 chs p = nextpg) {
785 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
786 1.29.2.4 nathanw if (p->flags & PG_BUSY) {
787 1.29.2.4 nathanw continue;
788 1.29.2.4 nathanw }
789 1.8 mrg
790 1.8 mrg /*
791 1.14 chs * lock the page's owner.
792 1.8 mrg */
793 1.8 mrg /* is page anon owned or ownerless? */
794 1.8 mrg if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
795 1.29.2.4 nathanw anon = p->uanon;
796 1.29.2.4 nathanw KASSERT(anon != NULL);
797 1.29.2.4 nathanw if (!simple_lock_try(&anon->an_lock)) {
798 1.8 mrg continue;
799 1.29.2.4 nathanw }
800 1.1 mrg
801 1.8 mrg /* take over the page? */
802 1.8 mrg if ((p->pqflags & PQ_ANON) == 0) {
803 1.24 chs KASSERT(p->loan_count > 0);
804 1.8 mrg p->loan_count--;
805 1.8 mrg p->pqflags |= PQ_ANON;
806 1.8 mrg }
807 1.8 mrg } else {
808 1.29.2.4 nathanw uobj = p->uobject;
809 1.29.2.4 nathanw if (!simple_lock_try(&uobj->vmobjlock)) {
810 1.8 mrg continue;
811 1.29.2.4 nathanw }
812 1.8 mrg }
813 1.24 chs
814 1.14 chs /*
815 1.14 chs * skip this page if it's busy.
816 1.14 chs */
817 1.24 chs
818 1.14 chs if ((p->flags & PG_BUSY) != 0) {
819 1.14 chs if (p->pqflags & PQ_ANON)
820 1.29.2.4 nathanw simple_unlock(&anon->an_lock);
821 1.14 chs else
822 1.29.2.4 nathanw simple_unlock(&uobj->vmobjlock);
823 1.14 chs continue;
824 1.14 chs }
825 1.24 chs
826 1.14 chs /*
827 1.14 chs * if there's a shortage of swap, free any swap allocated
828 1.14 chs * to this page so that other pages can be paged out.
829 1.14 chs */
830 1.24 chs
831 1.14 chs if (swap_shortage > 0) {
832 1.29.2.4 nathanw if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
833 1.29.2.4 nathanw uvm_swap_free(anon->an_swslot, 1);
834 1.29.2.4 nathanw anon->an_swslot = 0;
835 1.14 chs p->flags &= ~PG_CLEAN;
836 1.14 chs swap_shortage--;
837 1.29.2.4 nathanw } else if (p->pqflags & PQ_AOBJ) {
838 1.29.2.4 nathanw int slot = uao_set_swslot(uobj,
839 1.14 chs p->offset >> PAGE_SHIFT, 0);
840 1.14 chs if (slot) {
841 1.14 chs uvm_swap_free(slot, 1);
842 1.14 chs p->flags &= ~PG_CLEAN;
843 1.14 chs swap_shortage--;
844 1.14 chs }
845 1.14 chs }
846 1.14 chs }
847 1.24 chs
848 1.14 chs /*
849 1.29.2.4 nathanw * if there's a shortage of inactive pages, deactivate.
850 1.14 chs */
851 1.24 chs
852 1.29.2.2 nathanw if (inactive_shortage > 0) {
853 1.8 mrg /* no need to check wire_count as pg is "active" */
854 1.8 mrg uvm_pagedeactivate(p);
855 1.8 mrg uvmexp.pddeact++;
856 1.14 chs inactive_shortage--;
857 1.8 mrg }
858 1.29.2.4 nathanw
859 1.29.2.4 nathanw /*
860 1.29.2.4 nathanw * we're done with this page.
861 1.29.2.4 nathanw */
862 1.29.2.4 nathanw
863 1.8 mrg if (p->pqflags & PQ_ANON)
864 1.29.2.4 nathanw simple_unlock(&anon->an_lock);
865 1.8 mrg else
866 1.29.2.4 nathanw simple_unlock(&uobj->vmobjlock);
867 1.8 mrg }
868 1.1 mrg }
869