uvm_pdaemon.c revision 1.24 1 1.24 chs /* $NetBSD: uvm_pdaemon.c,v 1.24 2000/11/27 08:40:05 chs Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.1 mrg * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
42 1.4 mrg * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.1 mrg *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.1 mrg *
54 1.1 mrg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.1 mrg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.1 mrg *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.1 mrg
69 1.7 mrg #include "opt_uvmhist.h"
70 1.7 mrg
71 1.1 mrg /*
72 1.1 mrg * uvm_pdaemon.c: the page daemon
73 1.1 mrg */
74 1.1 mrg
75 1.1 mrg #include <sys/param.h>
76 1.1 mrg #include <sys/proc.h>
77 1.1 mrg #include <sys/systm.h>
78 1.1 mrg #include <sys/kernel.h>
79 1.9 pk #include <sys/pool.h>
80 1.24 chs #include <sys/buf.h>
81 1.1 mrg
82 1.1 mrg #include <uvm/uvm.h>
83 1.1 mrg
84 1.24 chs extern u_long uvm_pgcnt_vnode;
85 1.24 chs extern struct uvm_pagerops uvm_vnodeops;
86 1.24 chs
87 1.1 mrg /*
88 1.14 chs * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedeamon will reactivate
89 1.14 chs * in a pass thru the inactive list when swap is full. the value should be
90 1.14 chs * "small"... if it's too large we'll cycle the active pages thru the inactive
91 1.14 chs * queue too quickly to for them to be referenced and avoid being freed.
92 1.14 chs */
93 1.14 chs
94 1.14 chs #define UVMPD_NUMDIRTYREACTS 16
95 1.14 chs
96 1.14 chs
97 1.14 chs /*
98 1.1 mrg * local prototypes
99 1.1 mrg */
100 1.1 mrg
101 1.1 mrg static void uvmpd_scan __P((void));
102 1.1 mrg static boolean_t uvmpd_scan_inactive __P((struct pglist *));
103 1.1 mrg static void uvmpd_tune __P((void));
104 1.1 mrg
105 1.1 mrg
106 1.1 mrg /*
107 1.1 mrg * uvm_wait: wait (sleep) for the page daemon to free some pages
108 1.1 mrg *
109 1.1 mrg * => should be called with all locks released
110 1.1 mrg * => should _not_ be called by the page daemon (to avoid deadlock)
111 1.1 mrg */
112 1.1 mrg
113 1.19 thorpej void
114 1.19 thorpej uvm_wait(wmsg)
115 1.19 thorpej const char *wmsg;
116 1.8 mrg {
117 1.8 mrg int timo = 0;
118 1.8 mrg int s = splbio();
119 1.1 mrg
120 1.8 mrg /*
121 1.8 mrg * check for page daemon going to sleep (waiting for itself)
122 1.8 mrg */
123 1.1 mrg
124 1.8 mrg if (curproc == uvm.pagedaemon_proc) {
125 1.8 mrg /*
126 1.8 mrg * now we have a problem: the pagedaemon wants to go to
127 1.8 mrg * sleep until it frees more memory. but how can it
128 1.8 mrg * free more memory if it is asleep? that is a deadlock.
129 1.8 mrg * we have two options:
130 1.8 mrg * [1] panic now
131 1.8 mrg * [2] put a timeout on the sleep, thus causing the
132 1.8 mrg * pagedaemon to only pause (rather than sleep forever)
133 1.8 mrg *
134 1.8 mrg * note that option [2] will only help us if we get lucky
135 1.8 mrg * and some other process on the system breaks the deadlock
136 1.8 mrg * by exiting or freeing memory (thus allowing the pagedaemon
137 1.8 mrg * to continue). for now we panic if DEBUG is defined,
138 1.8 mrg * otherwise we hope for the best with option [2] (better
139 1.8 mrg * yet, this should never happen in the first place!).
140 1.8 mrg */
141 1.1 mrg
142 1.8 mrg printf("pagedaemon: deadlock detected!\n");
143 1.8 mrg timo = hz >> 3; /* set timeout */
144 1.1 mrg #if defined(DEBUG)
145 1.8 mrg /* DEBUG: panic so we can debug it */
146 1.8 mrg panic("pagedaemon deadlock");
147 1.1 mrg #endif
148 1.8 mrg }
149 1.1 mrg
150 1.8 mrg simple_lock(&uvm.pagedaemon_lock);
151 1.17 thorpej wakeup(&uvm.pagedaemon); /* wake the daemon! */
152 1.8 mrg UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
153 1.8 mrg timo);
154 1.1 mrg
155 1.8 mrg splx(s);
156 1.1 mrg }
157 1.1 mrg
158 1.1 mrg
159 1.1 mrg /*
160 1.1 mrg * uvmpd_tune: tune paging parameters
161 1.1 mrg *
162 1.1 mrg * => called when ever memory is added (or removed?) to the system
163 1.1 mrg * => caller must call with page queues locked
164 1.1 mrg */
165 1.1 mrg
166 1.8 mrg static void
167 1.8 mrg uvmpd_tune()
168 1.8 mrg {
169 1.8 mrg UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
170 1.1 mrg
171 1.8 mrg uvmexp.freemin = uvmexp.npages / 20;
172 1.1 mrg
173 1.8 mrg /* between 16k and 256k */
174 1.8 mrg /* XXX: what are these values good for? */
175 1.11 chs uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
176 1.11 chs uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
177 1.23 bjh21
178 1.23 bjh21 /* Make sure there's always a user page free. */
179 1.23 bjh21 if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
180 1.23 bjh21 uvmexp.freemin = uvmexp.reserve_kernel + 1;
181 1.1 mrg
182 1.8 mrg uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
183 1.8 mrg if (uvmexp.freetarg <= uvmexp.freemin)
184 1.8 mrg uvmexp.freetarg = uvmexp.freemin + 1;
185 1.1 mrg
186 1.8 mrg /* uvmexp.inactarg: computed in main daemon loop */
187 1.1 mrg
188 1.8 mrg uvmexp.wiredmax = uvmexp.npages / 3;
189 1.8 mrg UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
190 1.1 mrg uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
191 1.1 mrg }
192 1.1 mrg
193 1.1 mrg /*
194 1.1 mrg * uvm_pageout: the main loop for the pagedaemon
195 1.1 mrg */
196 1.1 mrg
197 1.8 mrg void
198 1.22 thorpej uvm_pageout(void *arg)
199 1.8 mrg {
200 1.8 mrg int npages = 0;
201 1.8 mrg UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
202 1.24 chs
203 1.8 mrg UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
204 1.8 mrg
205 1.8 mrg /*
206 1.8 mrg * ensure correct priority and set paging parameters...
207 1.8 mrg */
208 1.8 mrg
209 1.8 mrg uvm.pagedaemon_proc = curproc;
210 1.8 mrg (void) spl0();
211 1.8 mrg uvm_lock_pageq();
212 1.8 mrg npages = uvmexp.npages;
213 1.8 mrg uvmpd_tune();
214 1.8 mrg uvm_unlock_pageq();
215 1.8 mrg
216 1.8 mrg /*
217 1.8 mrg * main loop
218 1.8 mrg */
219 1.24 chs
220 1.24 chs for (;;) {
221 1.24 chs simple_lock(&uvm.pagedaemon_lock);
222 1.24 chs
223 1.24 chs UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
224 1.24 chs UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
225 1.24 chs &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
226 1.24 chs uvmexp.pdwoke++;
227 1.24 chs UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
228 1.24 chs
229 1.24 chs /* drain pool resources */
230 1.24 chs pool_drain(0);
231 1.1 mrg
232 1.8 mrg /*
233 1.24 chs * now lock page queues and recompute inactive count
234 1.8 mrg */
235 1.8 mrg
236 1.24 chs uvm_lock_pageq();
237 1.24 chs if (npages != uvmexp.npages) { /* check for new pages? */
238 1.24 chs npages = uvmexp.npages;
239 1.24 chs uvmpd_tune();
240 1.24 chs }
241 1.24 chs
242 1.24 chs uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
243 1.24 chs if (uvmexp.inactarg <= uvmexp.freetarg) {
244 1.24 chs uvmexp.inactarg = uvmexp.freetarg + 1;
245 1.24 chs }
246 1.24 chs
247 1.24 chs UVMHIST_LOG(pdhist," free/ftarg=%d/%d, inact/itarg=%d/%d",
248 1.24 chs uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
249 1.24 chs uvmexp.inactarg);
250 1.8 mrg
251 1.8 mrg /*
252 1.24 chs * scan if needed
253 1.8 mrg */
254 1.8 mrg
255 1.24 chs if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
256 1.24 chs uvmexp.inactive < uvmexp.inactarg ||
257 1.24 chs uvm_pgcnt_vnode >
258 1.24 chs (uvmexp.active + uvmexp.inactive + uvmexp.wired +
259 1.24 chs uvmexp.free) * 13 / 16) {
260 1.24 chs uvmpd_scan();
261 1.8 mrg }
262 1.8 mrg
263 1.8 mrg /*
264 1.24 chs * if there's any free memory to be had,
265 1.24 chs * wake up any waiters.
266 1.8 mrg */
267 1.8 mrg
268 1.24 chs if (uvmexp.free > uvmexp.reserve_kernel ||
269 1.24 chs uvmexp.paging == 0) {
270 1.24 chs wakeup(&uvmexp.free);
271 1.8 mrg }
272 1.1 mrg
273 1.8 mrg /*
274 1.24 chs * scan done. unlock page queues (the only lock we are holding)
275 1.8 mrg */
276 1.8 mrg
277 1.24 chs uvm_unlock_pageq();
278 1.24 chs }
279 1.24 chs /*NOTREACHED*/
280 1.24 chs }
281 1.24 chs
282 1.8 mrg
283 1.24 chs /*
284 1.24 chs * uvm_aiodone_daemon: main loop for the aiodone daemon.
285 1.24 chs */
286 1.8 mrg
287 1.24 chs void
288 1.24 chs uvm_aiodone_daemon(void *arg)
289 1.24 chs {
290 1.24 chs int s, free;
291 1.24 chs struct buf *bp, *nbp;
292 1.24 chs UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
293 1.9 pk
294 1.24 chs for (;;) {
295 1.8 mrg
296 1.8 mrg /*
297 1.24 chs * carefully attempt to go to sleep (without losing "wakeups"!).
298 1.24 chs * we need splbio because we want to make sure the aio_done list
299 1.24 chs * is totally empty before we go to sleep.
300 1.8 mrg */
301 1.8 mrg
302 1.24 chs s = splbio();
303 1.24 chs simple_lock(&uvm.aiodoned_lock);
304 1.24 chs if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
305 1.24 chs UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
306 1.24 chs UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
307 1.24 chs &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
308 1.24 chs UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
309 1.24 chs
310 1.24 chs /* relock aiodoned_lock, still at splbio */
311 1.24 chs simple_lock(&uvm.aiodoned_lock);
312 1.8 mrg }
313 1.8 mrg
314 1.24 chs /*
315 1.24 chs * check for done aio structures
316 1.24 chs */
317 1.8 mrg
318 1.24 chs bp = TAILQ_FIRST(&uvm.aio_done);
319 1.24 chs if (bp) {
320 1.24 chs TAILQ_INIT(&uvm.aio_done);
321 1.24 chs }
322 1.8 mrg
323 1.24 chs simple_unlock(&uvm.aiodoned_lock);
324 1.24 chs splx(s);
325 1.8 mrg
326 1.8 mrg /*
327 1.24 chs * process each i/o that's done.
328 1.8 mrg */
329 1.8 mrg
330 1.24 chs free = uvmexp.free;
331 1.24 chs while (bp != NULL) {
332 1.24 chs if (bp->b_flags & B_PDAEMON) {
333 1.24 chs uvmexp.paging -= bp->b_bufsize >> PAGE_SHIFT;
334 1.24 chs }
335 1.24 chs nbp = TAILQ_NEXT(bp, b_freelist);
336 1.24 chs (*bp->b_iodone)(bp);
337 1.24 chs bp = nbp;
338 1.24 chs }
339 1.24 chs if (free <= uvmexp.reserve_kernel) {
340 1.24 chs s = uvm_lock_fpageq();
341 1.24 chs wakeup(&uvm.pagedaemon);
342 1.24 chs uvm_unlock_fpageq(s);
343 1.24 chs } else {
344 1.24 chs simple_lock(&uvm.pagedaemon_lock);
345 1.17 thorpej wakeup(&uvmexp.free);
346 1.24 chs simple_unlock(&uvm.pagedaemon_lock);
347 1.24 chs }
348 1.8 mrg }
349 1.1 mrg }
350 1.1 mrg
351 1.24 chs
352 1.24 chs
353 1.1 mrg /*
354 1.24 chs * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
355 1.1 mrg *
356 1.1 mrg * => called with page queues locked
357 1.1 mrg * => we work on meeting our free target by converting inactive pages
358 1.1 mrg * into free pages.
359 1.1 mrg * => we handle the building of swap-backed clusters
360 1.1 mrg * => we return TRUE if we are exiting because we met our target
361 1.1 mrg */
362 1.1 mrg
363 1.8 mrg static boolean_t
364 1.8 mrg uvmpd_scan_inactive(pglst)
365 1.8 mrg struct pglist *pglst;
366 1.8 mrg {
367 1.8 mrg boolean_t retval = FALSE; /* assume we haven't hit target */
368 1.8 mrg int s, free, result;
369 1.8 mrg struct vm_page *p, *nextpg;
370 1.8 mrg struct uvm_object *uobj;
371 1.11 chs struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
372 1.8 mrg int npages;
373 1.11 chs struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; /* XXX: see below */
374 1.8 mrg int swnpages, swcpages; /* XXX: see below */
375 1.14 chs int swslot;
376 1.8 mrg struct vm_anon *anon;
377 1.24 chs boolean_t swap_backed, vnode_only;
378 1.10 eeh vaddr_t start;
379 1.24 chs int dirtyreacts, vpgs;
380 1.8 mrg UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
381 1.1 mrg
382 1.8 mrg /*
383 1.8 mrg * note: we currently keep swap-backed pages on a seperate inactive
384 1.8 mrg * list from object-backed pages. however, merging the two lists
385 1.8 mrg * back together again hasn't been ruled out. thus, we keep our
386 1.8 mrg * swap cluster in "swpps" rather than in pps (allows us to mix
387 1.8 mrg * clustering types in the event of a mixed inactive queue).
388 1.8 mrg */
389 1.1 mrg
390 1.8 mrg /*
391 1.8 mrg * swslot is non-zero if we are building a swap cluster. we want
392 1.24 chs * to stay in the loop while we have a page to scan or we have
393 1.8 mrg * a swap-cluster to build.
394 1.8 mrg */
395 1.24 chs
396 1.8 mrg swslot = 0;
397 1.8 mrg swnpages = swcpages = 0;
398 1.8 mrg free = 0;
399 1.14 chs dirtyreacts = 0;
400 1.24 chs vnode_only = FALSE;
401 1.8 mrg
402 1.24 chs for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
403 1.8 mrg
404 1.8 mrg /*
405 1.8 mrg * note that p can be NULL iff we have traversed the whole
406 1.8 mrg * list and need to do one final swap-backed clustered pageout.
407 1.8 mrg */
408 1.24 chs
409 1.24 chs uobj = NULL;
410 1.24 chs anon = NULL;
411 1.24 chs
412 1.8 mrg if (p) {
413 1.24 chs
414 1.8 mrg /*
415 1.8 mrg * update our copy of "free" and see if we've met
416 1.8 mrg * our target
417 1.8 mrg */
418 1.24 chs
419 1.16 thorpej s = uvm_lock_fpageq();
420 1.8 mrg free = uvmexp.free;
421 1.16 thorpej uvm_unlock_fpageq(s);
422 1.8 mrg
423 1.24 chs /* XXXUBC */
424 1.24 chs vpgs = uvm_pgcnt_vnode -
425 1.24 chs (uvmexp.active + uvmexp.inactive +
426 1.24 chs uvmexp.wired + uvmexp.free) * 13 / 16;
427 1.24 chs
428 1.14 chs if (free + uvmexp.paging >= uvmexp.freetarg << 2 ||
429 1.14 chs dirtyreacts == UVMPD_NUMDIRTYREACTS) {
430 1.24 chs if (vpgs <= 0) {
431 1.24 chs UVMHIST_LOG(pdhist," met free target: "
432 1.24 chs "exit loop", 0, 0, 0, 0);
433 1.24 chs retval = TRUE;
434 1.24 chs
435 1.24 chs if (swslot == 0)
436 1.24 chs /* exit now if no
437 1.24 chs swap-i/o pending */
438 1.24 chs break;
439 1.24 chs
440 1.24 chs /* set p to null to signal final
441 1.24 chs swap i/o */
442 1.24 chs p = NULL;
443 1.24 chs } else {
444 1.24 chs vnode_only = TRUE;
445 1.24 chs }
446 1.8 mrg }
447 1.8 mrg }
448 1.8 mrg
449 1.24 chs if (p) { /* if (we have a new page to consider) */
450 1.8 mrg
451 1.8 mrg /*
452 1.8 mrg * we are below target and have a new page to consider.
453 1.8 mrg */
454 1.24 chs
455 1.8 mrg uvmexp.pdscans++;
456 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
457 1.8 mrg
458 1.8 mrg /*
459 1.8 mrg * first we attempt to lock the object that this page
460 1.8 mrg * belongs to. if our attempt fails we skip on to
461 1.8 mrg * the next page (no harm done). it is important to
462 1.8 mrg * "try" locking the object as we are locking in the
463 1.8 mrg * wrong order (pageq -> object) and we don't want to
464 1.24 chs * deadlock.
465 1.8 mrg *
466 1.24 chs * the only time we expect to see an ownerless page
467 1.8 mrg * (i.e. a page with no uobject and !PQ_ANON) is if an
468 1.8 mrg * anon has loaned a page from a uvm_object and the
469 1.8 mrg * uvm_object has dropped the ownership. in that
470 1.8 mrg * case, the anon can "take over" the loaned page
471 1.8 mrg * and make it its own.
472 1.8 mrg */
473 1.8 mrg
474 1.8 mrg /* is page part of an anon or ownerless ? */
475 1.8 mrg if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
476 1.24 chs if (vnode_only) {
477 1.24 chs uvm_pageactivate(p);
478 1.24 chs continue;
479 1.24 chs }
480 1.8 mrg anon = p->uanon;
481 1.24 chs KASSERT(anon != NULL);
482 1.8 mrg if (!simple_lock_try(&anon->an_lock))
483 1.8 mrg /* lock failed, skip this page */
484 1.8 mrg continue;
485 1.8 mrg
486 1.8 mrg /*
487 1.8 mrg * if the page is ownerless, claim it in the
488 1.8 mrg * name of "anon"!
489 1.8 mrg */
490 1.24 chs
491 1.8 mrg if ((p->pqflags & PQ_ANON) == 0) {
492 1.24 chs KASSERT(p->loan_count > 0);
493 1.8 mrg p->loan_count--;
494 1.24 chs p->pqflags |= PQ_ANON;
495 1.24 chs /* anon now owns it */
496 1.8 mrg }
497 1.8 mrg if (p->flags & PG_BUSY) {
498 1.8 mrg simple_unlock(&anon->an_lock);
499 1.8 mrg uvmexp.pdbusy++;
500 1.8 mrg /* someone else owns page, skip it */
501 1.8 mrg continue;
502 1.8 mrg }
503 1.8 mrg uvmexp.pdanscan++;
504 1.8 mrg } else {
505 1.8 mrg uobj = p->uobject;
506 1.24 chs KASSERT(uobj != NULL);
507 1.24 chs if (vnode_only &&
508 1.24 chs uobj->pgops != &uvm_vnodeops) {
509 1.24 chs uvm_pageactivate(p);
510 1.24 chs continue;
511 1.24 chs }
512 1.8 mrg if (!simple_lock_try(&uobj->vmobjlock))
513 1.8 mrg /* lock failed, skip this page */
514 1.24 chs continue;
515 1.8 mrg
516 1.8 mrg if (p->flags & PG_BUSY) {
517 1.8 mrg simple_unlock(&uobj->vmobjlock);
518 1.8 mrg uvmexp.pdbusy++;
519 1.8 mrg /* someone else owns page, skip it */
520 1.24 chs continue;
521 1.8 mrg }
522 1.8 mrg uvmexp.pdobscan++;
523 1.8 mrg }
524 1.8 mrg
525 1.8 mrg /*
526 1.8 mrg * we now have the object and the page queues locked.
527 1.8 mrg * the page is not busy. if the page is clean we
528 1.8 mrg * can free it now and continue.
529 1.8 mrg */
530 1.8 mrg
531 1.8 mrg if (p->flags & PG_CLEAN) {
532 1.14 chs if (p->pqflags & PQ_SWAPBACKED) {
533 1.14 chs /* this page now lives only in swap */
534 1.14 chs simple_lock(&uvm.swap_data_lock);
535 1.14 chs uvmexp.swpgonly++;
536 1.14 chs simple_unlock(&uvm.swap_data_lock);
537 1.14 chs }
538 1.14 chs
539 1.8 mrg uvm_pagefree(p);
540 1.8 mrg uvmexp.pdfreed++;
541 1.24 chs
542 1.8 mrg if (anon) {
543 1.24 chs
544 1.8 mrg /*
545 1.8 mrg * an anonymous page can only be clean
546 1.24 chs * if it has backing store assigned.
547 1.8 mrg */
548 1.24 chs
549 1.24 chs KASSERT(anon->an_swslot != 0);
550 1.24 chs
551 1.8 mrg /* remove from object */
552 1.8 mrg anon->u.an_page = NULL;
553 1.8 mrg simple_unlock(&anon->an_lock);
554 1.8 mrg } else {
555 1.8 mrg /* pagefree has already removed the
556 1.8 mrg * page from the object */
557 1.8 mrg simple_unlock(&uobj->vmobjlock);
558 1.8 mrg }
559 1.8 mrg continue;
560 1.8 mrg }
561 1.8 mrg
562 1.8 mrg /*
563 1.8 mrg * this page is dirty, skip it if we'll have met our
564 1.8 mrg * free target when all the current pageouts complete.
565 1.8 mrg */
566 1.24 chs
567 1.14 chs if (free + uvmexp.paging > uvmexp.freetarg << 2) {
568 1.8 mrg if (anon) {
569 1.8 mrg simple_unlock(&anon->an_lock);
570 1.8 mrg } else {
571 1.8 mrg simple_unlock(&uobj->vmobjlock);
572 1.8 mrg }
573 1.8 mrg continue;
574 1.8 mrg }
575 1.8 mrg
576 1.8 mrg /*
577 1.14 chs * this page is dirty, but we can't page it out
578 1.14 chs * since all pages in swap are only in swap.
579 1.14 chs * reactivate it so that we eventually cycle
580 1.14 chs * all pages thru the inactive queue.
581 1.14 chs */
582 1.24 chs
583 1.24 chs KASSERT(uvmexp.swpgonly <= uvmexp.swpages);
584 1.14 chs if ((p->pqflags & PQ_SWAPBACKED) &&
585 1.14 chs uvmexp.swpgonly == uvmexp.swpages) {
586 1.14 chs dirtyreacts++;
587 1.14 chs uvm_pageactivate(p);
588 1.14 chs if (anon) {
589 1.14 chs simple_unlock(&anon->an_lock);
590 1.14 chs } else {
591 1.14 chs simple_unlock(&uobj->vmobjlock);
592 1.14 chs }
593 1.14 chs continue;
594 1.14 chs }
595 1.14 chs
596 1.14 chs /*
597 1.14 chs * if the page is swap-backed and dirty and swap space
598 1.14 chs * is full, free any swap allocated to the page
599 1.14 chs * so that other pages can be paged out.
600 1.14 chs */
601 1.24 chs
602 1.24 chs KASSERT(uvmexp.swpginuse <= uvmexp.swpages);
603 1.14 chs if ((p->pqflags & PQ_SWAPBACKED) &&
604 1.14 chs uvmexp.swpginuse == uvmexp.swpages) {
605 1.14 chs
606 1.14 chs if ((p->pqflags & PQ_ANON) &&
607 1.14 chs p->uanon->an_swslot) {
608 1.14 chs uvm_swap_free(p->uanon->an_swslot, 1);
609 1.14 chs p->uanon->an_swslot = 0;
610 1.14 chs }
611 1.14 chs if (p->pqflags & PQ_AOBJ) {
612 1.14 chs uao_dropswap(p->uobject,
613 1.14 chs p->offset >> PAGE_SHIFT);
614 1.14 chs }
615 1.14 chs }
616 1.14 chs
617 1.14 chs /*
618 1.8 mrg * the page we are looking at is dirty. we must
619 1.8 mrg * clean it before it can be freed. to do this we
620 1.8 mrg * first mark the page busy so that no one else will
621 1.24 chs * touch the page.
622 1.8 mrg */
623 1.8 mrg
624 1.8 mrg swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
625 1.8 mrg p->flags |= PG_BUSY; /* now we own it */
626 1.8 mrg UVM_PAGE_OWN(p, "scan_inactive");
627 1.8 mrg uvmexp.pgswapout++;
628 1.8 mrg
629 1.8 mrg /*
630 1.8 mrg * for swap-backed pages we need to (re)allocate
631 1.8 mrg * swap space.
632 1.8 mrg */
633 1.24 chs
634 1.8 mrg if (swap_backed) {
635 1.8 mrg
636 1.8 mrg /*
637 1.8 mrg * free old swap slot (if any)
638 1.8 mrg */
639 1.24 chs
640 1.8 mrg if (anon) {
641 1.8 mrg if (anon->an_swslot) {
642 1.8 mrg uvm_swap_free(anon->an_swslot,
643 1.8 mrg 1);
644 1.8 mrg anon->an_swslot = 0;
645 1.8 mrg }
646 1.8 mrg } else {
647 1.14 chs uao_dropswap(uobj,
648 1.14 chs p->offset >> PAGE_SHIFT);
649 1.8 mrg }
650 1.8 mrg
651 1.8 mrg /*
652 1.8 mrg * start new cluster (if necessary)
653 1.8 mrg */
654 1.24 chs
655 1.8 mrg if (swslot == 0) {
656 1.11 chs swnpages = MAXBSIZE >> PAGE_SHIFT;
657 1.8 mrg swslot = uvm_swap_alloc(&swnpages,
658 1.8 mrg TRUE);
659 1.8 mrg if (swslot == 0) {
660 1.8 mrg /* no swap? give up! */
661 1.8 mrg p->flags &= ~PG_BUSY;
662 1.8 mrg UVM_PAGE_OWN(p, NULL);
663 1.8 mrg if (anon)
664 1.8 mrg simple_unlock(
665 1.8 mrg &anon->an_lock);
666 1.8 mrg else
667 1.8 mrg simple_unlock(
668 1.8 mrg &uobj->vmobjlock);
669 1.8 mrg continue;
670 1.8 mrg }
671 1.8 mrg swcpages = 0; /* cluster is empty */
672 1.8 mrg }
673 1.8 mrg
674 1.8 mrg /*
675 1.8 mrg * add block to cluster
676 1.8 mrg */
677 1.24 chs
678 1.8 mrg swpps[swcpages] = p;
679 1.8 mrg if (anon)
680 1.8 mrg anon->an_swslot = swslot + swcpages;
681 1.8 mrg else
682 1.8 mrg uao_set_swslot(uobj,
683 1.11 chs p->offset >> PAGE_SHIFT,
684 1.8 mrg swslot + swcpages);
685 1.8 mrg swcpages++;
686 1.8 mrg }
687 1.8 mrg } else {
688 1.8 mrg
689 1.8 mrg /* if p == NULL we must be doing a last swap i/o */
690 1.8 mrg swap_backed = TRUE;
691 1.8 mrg }
692 1.8 mrg
693 1.8 mrg /*
694 1.24 chs * now consider doing the pageout.
695 1.8 mrg *
696 1.24 chs * for swap-backed pages, we do the pageout if we have either
697 1.24 chs * filled the cluster (in which case (swnpages == swcpages) or
698 1.8 mrg * run out of pages (p == NULL).
699 1.8 mrg *
700 1.8 mrg * for object pages, we always do the pageout.
701 1.8 mrg */
702 1.24 chs
703 1.8 mrg if (swap_backed) {
704 1.8 mrg if (p) { /* if we just added a page to cluster */
705 1.8 mrg if (anon)
706 1.8 mrg simple_unlock(&anon->an_lock);
707 1.8 mrg else
708 1.8 mrg simple_unlock(&uobj->vmobjlock);
709 1.8 mrg
710 1.8 mrg /* cluster not full yet? */
711 1.8 mrg if (swcpages < swnpages)
712 1.8 mrg continue;
713 1.8 mrg }
714 1.8 mrg
715 1.8 mrg /* starting I/O now... set up for it */
716 1.8 mrg npages = swcpages;
717 1.8 mrg ppsp = swpps;
718 1.8 mrg /* for swap-backed pages only */
719 1.10 eeh start = (vaddr_t) swslot;
720 1.8 mrg
721 1.8 mrg /* if this is final pageout we could have a few
722 1.8 mrg * extra swap blocks */
723 1.8 mrg if (swcpages < swnpages) {
724 1.8 mrg uvm_swap_free(swslot + swcpages,
725 1.8 mrg (swnpages - swcpages));
726 1.24 chs }
727 1.8 mrg } else {
728 1.8 mrg /* normal object pageout */
729 1.8 mrg ppsp = pps;
730 1.8 mrg npages = sizeof(pps) / sizeof(struct vm_page *);
731 1.8 mrg /* not looked at because PGO_ALLPAGES is set */
732 1.8 mrg start = 0;
733 1.8 mrg }
734 1.8 mrg
735 1.8 mrg /*
736 1.8 mrg * now do the pageout.
737 1.24 chs *
738 1.8 mrg * for swap_backed pages we have already built the cluster.
739 1.8 mrg * for !swap_backed pages, uvm_pager_put will call the object's
740 1.8 mrg * "make put cluster" function to build a cluster on our behalf.
741 1.8 mrg *
742 1.8 mrg * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
743 1.8 mrg * it to free the cluster pages for us on a successful I/O (it
744 1.8 mrg * always does this for un-successful I/O requests). this
745 1.8 mrg * allows us to do clustered pageout without having to deal
746 1.8 mrg * with cluster pages at this level.
747 1.8 mrg *
748 1.8 mrg * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
749 1.8 mrg * IN: locked: uobj (if !swap_backed), page queues
750 1.8 mrg * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
751 1.8 mrg * !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
752 1.8 mrg *
753 1.8 mrg * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
754 1.8 mrg */
755 1.8 mrg
756 1.8 mrg /* locked: uobj (if !swap_backed), page queues */
757 1.8 mrg uvmexp.pdpageouts++;
758 1.24 chs result = uvm_pager_put(swap_backed ? NULL : uobj, p,
759 1.8 mrg &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
760 1.8 mrg /* locked: uobj (if !swap_backed && result != PEND) */
761 1.8 mrg /* unlocked: pageqs, object (if swap_backed ||result == PEND) */
762 1.8 mrg
763 1.8 mrg /*
764 1.8 mrg * if we did i/o to swap, zero swslot to indicate that we are
765 1.8 mrg * no longer building a swap-backed cluster.
766 1.8 mrg */
767 1.8 mrg
768 1.8 mrg if (swap_backed)
769 1.8 mrg swslot = 0; /* done with this cluster */
770 1.8 mrg
771 1.8 mrg /*
772 1.8 mrg * first, we check for VM_PAGER_PEND which means that the
773 1.8 mrg * async I/O is in progress and the async I/O done routine
774 1.8 mrg * will clean up after us. in this case we move on to the
775 1.8 mrg * next page.
776 1.8 mrg *
777 1.8 mrg * there is a very remote chance that the pending async i/o can
778 1.8 mrg * finish _before_ we get here. if that happens, our page "p"
779 1.8 mrg * may no longer be on the inactive queue. so we verify this
780 1.8 mrg * when determining the next page (starting over at the head if
781 1.8 mrg * we've lost our inactive page).
782 1.8 mrg */
783 1.8 mrg
784 1.8 mrg if (result == VM_PAGER_PEND) {
785 1.8 mrg uvmexp.paging += npages;
786 1.24 chs uvm_lock_pageq();
787 1.8 mrg uvmexp.pdpending++;
788 1.8 mrg if (p) {
789 1.8 mrg if (p->pqflags & PQ_INACTIVE)
790 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
791 1.8 mrg else
792 1.24 chs nextpg = TAILQ_FIRST(pglst);
793 1.24 chs } else {
794 1.24 chs nextpg = NULL;
795 1.8 mrg }
796 1.8 mrg continue;
797 1.8 mrg }
798 1.8 mrg
799 1.24 chs if (result == VM_PAGER_ERROR &&
800 1.24 chs curproc == uvm.pagedaemon_proc) {
801 1.24 chs uvm_lock_pageq();
802 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
803 1.24 chs uvm_pageactivate(p);
804 1.24 chs continue;
805 1.24 chs }
806 1.24 chs
807 1.8 mrg /*
808 1.8 mrg * clean up "p" if we have one
809 1.8 mrg */
810 1.8 mrg
811 1.8 mrg if (p) {
812 1.8 mrg /*
813 1.8 mrg * the I/O request to "p" is done and uvm_pager_put
814 1.8 mrg * has freed any cluster pages it may have allocated
815 1.8 mrg * during I/O. all that is left for us to do is
816 1.8 mrg * clean up page "p" (which is still PG_BUSY).
817 1.8 mrg *
818 1.8 mrg * our result could be one of the following:
819 1.8 mrg * VM_PAGER_OK: successful pageout
820 1.8 mrg *
821 1.8 mrg * VM_PAGER_AGAIN: tmp resource shortage, we skip
822 1.8 mrg * to next page
823 1.8 mrg * VM_PAGER_{FAIL,ERROR,BAD}: an error. we
824 1.8 mrg * "reactivate" page to get it out of the way (it
825 1.8 mrg * will eventually drift back into the inactive
826 1.8 mrg * queue for a retry).
827 1.8 mrg * VM_PAGER_UNLOCK: should never see this as it is
828 1.8 mrg * only valid for "get" operations
829 1.8 mrg */
830 1.8 mrg
831 1.8 mrg /* relock p's object: page queues not lock yet, so
832 1.8 mrg * no need for "try" */
833 1.8 mrg
834 1.8 mrg /* !swap_backed case: already locked... */
835 1.8 mrg if (swap_backed) {
836 1.8 mrg if (anon)
837 1.8 mrg simple_lock(&anon->an_lock);
838 1.8 mrg else
839 1.8 mrg simple_lock(&uobj->vmobjlock);
840 1.8 mrg }
841 1.1 mrg
842 1.8 mrg /* handle PG_WANTED now */
843 1.8 mrg if (p->flags & PG_WANTED)
844 1.8 mrg /* still holding object lock */
845 1.17 thorpej wakeup(p);
846 1.8 mrg
847 1.8 mrg p->flags &= ~(PG_BUSY|PG_WANTED);
848 1.8 mrg UVM_PAGE_OWN(p, NULL);
849 1.8 mrg
850 1.8 mrg /* released during I/O? */
851 1.8 mrg if (p->flags & PG_RELEASED) {
852 1.8 mrg if (anon) {
853 1.8 mrg /* remove page so we can get nextpg */
854 1.8 mrg anon->u.an_page = NULL;
855 1.8 mrg
856 1.8 mrg simple_unlock(&anon->an_lock);
857 1.8 mrg uvm_anfree(anon); /* kills anon */
858 1.18 chs pmap_page_protect(p, VM_PROT_NONE);
859 1.8 mrg anon = NULL;
860 1.8 mrg uvm_lock_pageq();
861 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
862 1.8 mrg /* free released page */
863 1.8 mrg uvm_pagefree(p);
864 1.1 mrg
865 1.8 mrg } else {
866 1.1 mrg
867 1.24 chs /*
868 1.8 mrg * pgo_releasepg nukes the page and
869 1.8 mrg * gets "nextpg" for us. it returns
870 1.8 mrg * with the page queues locked (when
871 1.8 mrg * given nextpg ptr).
872 1.8 mrg */
873 1.24 chs
874 1.8 mrg if (!uobj->pgops->pgo_releasepg(p,
875 1.8 mrg &nextpg))
876 1.8 mrg /* uobj died after release */
877 1.8 mrg uobj = NULL;
878 1.8 mrg
879 1.8 mrg /*
880 1.8 mrg * lock page queues here so that they're
881 1.8 mrg * always locked at the end of the loop.
882 1.8 mrg */
883 1.24 chs
884 1.8 mrg uvm_lock_pageq();
885 1.8 mrg }
886 1.8 mrg } else { /* page was not released during I/O */
887 1.8 mrg uvm_lock_pageq();
888 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
889 1.8 mrg if (result != VM_PAGER_OK) {
890 1.8 mrg /* pageout was a failure... */
891 1.8 mrg if (result != VM_PAGER_AGAIN)
892 1.8 mrg uvm_pageactivate(p);
893 1.18 chs pmap_clear_reference(p);
894 1.8 mrg /* XXXCDC: if (swap_backed) FREE p's
895 1.8 mrg * swap block? */
896 1.8 mrg } else {
897 1.8 mrg /* pageout was a success... */
898 1.18 chs pmap_clear_reference(p);
899 1.18 chs pmap_clear_modify(p);
900 1.8 mrg p->flags |= PG_CLEAN;
901 1.8 mrg }
902 1.8 mrg }
903 1.24 chs
904 1.8 mrg /*
905 1.8 mrg * drop object lock (if there is an object left). do
906 1.8 mrg * a safety check of nextpg to make sure it is on the
907 1.8 mrg * inactive queue (it should be since PG_BUSY pages on
908 1.8 mrg * the inactive queue can't be re-queued [note: not
909 1.8 mrg * true for active queue]).
910 1.8 mrg */
911 1.8 mrg
912 1.8 mrg if (anon)
913 1.8 mrg simple_unlock(&anon->an_lock);
914 1.8 mrg else if (uobj)
915 1.8 mrg simple_unlock(&uobj->vmobjlock);
916 1.8 mrg
917 1.24 chs } else {
918 1.24 chs
919 1.24 chs /*
920 1.24 chs * if p is null in this loop, make sure it stays null
921 1.24 chs * in the next loop.
922 1.24 chs */
923 1.8 mrg
924 1.8 mrg nextpg = NULL;
925 1.8 mrg
926 1.8 mrg /*
927 1.8 mrg * lock page queues here just so they're always locked
928 1.8 mrg * at the end of the loop.
929 1.8 mrg */
930 1.24 chs
931 1.8 mrg uvm_lock_pageq();
932 1.8 mrg }
933 1.8 mrg
934 1.8 mrg if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
935 1.24 chs nextpg = TAILQ_FIRST(pglst); /* reload! */
936 1.8 mrg }
937 1.24 chs }
938 1.8 mrg return (retval);
939 1.1 mrg }
940 1.1 mrg
941 1.1 mrg /*
942 1.1 mrg * uvmpd_scan: scan the page queues and attempt to meet our targets.
943 1.1 mrg *
944 1.1 mrg * => called with pageq's locked
945 1.1 mrg */
946 1.1 mrg
947 1.8 mrg void
948 1.8 mrg uvmpd_scan()
949 1.1 mrg {
950 1.14 chs int s, free, inactive_shortage, swap_shortage, pages_freed;
951 1.8 mrg struct vm_page *p, *nextpg;
952 1.8 mrg struct uvm_object *uobj;
953 1.8 mrg boolean_t got_it;
954 1.8 mrg UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
955 1.1 mrg
956 1.8 mrg uvmexp.pdrevs++; /* counter */
957 1.24 chs uobj = NULL;
958 1.1 mrg
959 1.8 mrg /*
960 1.8 mrg * get current "free" page count
961 1.8 mrg */
962 1.16 thorpej s = uvm_lock_fpageq();
963 1.8 mrg free = uvmexp.free;
964 1.16 thorpej uvm_unlock_fpageq(s);
965 1.1 mrg
966 1.1 mrg #ifndef __SWAP_BROKEN
967 1.8 mrg /*
968 1.8 mrg * swap out some processes if we are below our free target.
969 1.8 mrg * we need to unlock the page queues for this.
970 1.8 mrg */
971 1.8 mrg if (free < uvmexp.freetarg) {
972 1.8 mrg uvmexp.pdswout++;
973 1.8 mrg UVMHIST_LOG(pdhist," free %d < target %d: swapout", free,
974 1.8 mrg uvmexp.freetarg, 0, 0);
975 1.8 mrg uvm_unlock_pageq();
976 1.8 mrg uvm_swapout_threads();
977 1.8 mrg uvm_lock_pageq();
978 1.1 mrg
979 1.8 mrg }
980 1.1 mrg #endif
981 1.1 mrg
982 1.8 mrg /*
983 1.8 mrg * now we want to work on meeting our targets. first we work on our
984 1.8 mrg * free target by converting inactive pages into free pages. then
985 1.8 mrg * we work on meeting our inactive target by converting active pages
986 1.8 mrg * to inactive ones.
987 1.8 mrg */
988 1.8 mrg
989 1.8 mrg UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
990 1.8 mrg
991 1.8 mrg /*
992 1.24 chs * alternate starting queue between swap and object based on the
993 1.24 chs * low bit of uvmexp.pdrevs (which we bump by one each call).
994 1.8 mrg */
995 1.8 mrg
996 1.8 mrg got_it = FALSE;
997 1.14 chs pages_freed = uvmexp.pdfreed;
998 1.8 mrg if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
999 1.8 mrg got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
1000 1.8 mrg if (!got_it)
1001 1.8 mrg got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
1002 1.8 mrg if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
1003 1.8 mrg (void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
1004 1.14 chs pages_freed = uvmexp.pdfreed - pages_freed;
1005 1.8 mrg
1006 1.8 mrg /*
1007 1.8 mrg * we have done the scan to get free pages. now we work on meeting
1008 1.8 mrg * our inactive target.
1009 1.8 mrg */
1010 1.8 mrg
1011 1.14 chs inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
1012 1.14 chs
1013 1.14 chs /*
1014 1.14 chs * detect if we're not going to be able to page anything out
1015 1.14 chs * until we free some swap resources from active pages.
1016 1.14 chs */
1017 1.24 chs
1018 1.14 chs swap_shortage = 0;
1019 1.14 chs if (uvmexp.free < uvmexp.freetarg &&
1020 1.14 chs uvmexp.swpginuse == uvmexp.swpages &&
1021 1.14 chs uvmexp.swpgonly < uvmexp.swpages &&
1022 1.14 chs pages_freed == 0) {
1023 1.14 chs swap_shortage = uvmexp.freetarg - uvmexp.free;
1024 1.14 chs }
1025 1.24 chs
1026 1.14 chs UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%d swap_shortage=%d",
1027 1.14 chs inactive_shortage, swap_shortage,0,0);
1028 1.24 chs for (p = TAILQ_FIRST(&uvm.page_active);
1029 1.14 chs p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
1030 1.14 chs p = nextpg) {
1031 1.24 chs nextpg = TAILQ_NEXT(p, pageq);
1032 1.8 mrg if (p->flags & PG_BUSY)
1033 1.8 mrg continue; /* quick check before trying to lock */
1034 1.8 mrg
1035 1.8 mrg /*
1036 1.14 chs * lock the page's owner.
1037 1.8 mrg */
1038 1.8 mrg /* is page anon owned or ownerless? */
1039 1.8 mrg if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
1040 1.24 chs KASSERT(p->uanon != NULL);
1041 1.8 mrg if (!simple_lock_try(&p->uanon->an_lock))
1042 1.8 mrg continue;
1043 1.1 mrg
1044 1.8 mrg /* take over the page? */
1045 1.8 mrg if ((p->pqflags & PQ_ANON) == 0) {
1046 1.24 chs KASSERT(p->loan_count > 0);
1047 1.8 mrg p->loan_count--;
1048 1.8 mrg p->pqflags |= PQ_ANON;
1049 1.8 mrg }
1050 1.8 mrg } else {
1051 1.8 mrg if (!simple_lock_try(&p->uobject->vmobjlock))
1052 1.8 mrg continue;
1053 1.8 mrg }
1054 1.24 chs
1055 1.14 chs /*
1056 1.14 chs * skip this page if it's busy.
1057 1.14 chs */
1058 1.24 chs
1059 1.14 chs if ((p->flags & PG_BUSY) != 0) {
1060 1.14 chs if (p->pqflags & PQ_ANON)
1061 1.14 chs simple_unlock(&p->uanon->an_lock);
1062 1.14 chs else
1063 1.14 chs simple_unlock(&p->uobject->vmobjlock);
1064 1.14 chs continue;
1065 1.14 chs }
1066 1.24 chs
1067 1.14 chs /*
1068 1.14 chs * if there's a shortage of swap, free any swap allocated
1069 1.14 chs * to this page so that other pages can be paged out.
1070 1.14 chs */
1071 1.24 chs
1072 1.14 chs if (swap_shortage > 0) {
1073 1.14 chs if ((p->pqflags & PQ_ANON) && p->uanon->an_swslot) {
1074 1.14 chs uvm_swap_free(p->uanon->an_swslot, 1);
1075 1.14 chs p->uanon->an_swslot = 0;
1076 1.14 chs p->flags &= ~PG_CLEAN;
1077 1.14 chs swap_shortage--;
1078 1.14 chs }
1079 1.14 chs if (p->pqflags & PQ_AOBJ) {
1080 1.14 chs int slot = uao_set_swslot(p->uobject,
1081 1.14 chs p->offset >> PAGE_SHIFT, 0);
1082 1.14 chs if (slot) {
1083 1.14 chs uvm_swap_free(slot, 1);
1084 1.14 chs p->flags &= ~PG_CLEAN;
1085 1.14 chs swap_shortage--;
1086 1.14 chs }
1087 1.14 chs }
1088 1.14 chs }
1089 1.24 chs
1090 1.14 chs /*
1091 1.14 chs * deactivate this page if there's a shortage of
1092 1.14 chs * inactive pages.
1093 1.14 chs */
1094 1.24 chs
1095 1.14 chs if (inactive_shortage > 0) {
1096 1.18 chs pmap_page_protect(p, VM_PROT_NONE);
1097 1.8 mrg /* no need to check wire_count as pg is "active" */
1098 1.8 mrg uvm_pagedeactivate(p);
1099 1.8 mrg uvmexp.pddeact++;
1100 1.14 chs inactive_shortage--;
1101 1.8 mrg }
1102 1.8 mrg if (p->pqflags & PQ_ANON)
1103 1.8 mrg simple_unlock(&p->uanon->an_lock);
1104 1.8 mrg else
1105 1.8 mrg simple_unlock(&p->uobject->vmobjlock);
1106 1.8 mrg }
1107 1.1 mrg }
1108