uvm_pdaemon.c revision 1.73 1 /* $NetBSD: uvm_pdaemon.c,v 1.73 2006/02/12 09:19:59 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
42 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_pdaemon.c: the page daemon
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.73 2006/02/12 09:19:59 yamt Exp $");
75
76 #include "opt_uvmhist.h"
77 #include "opt_readahead.h"
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 #include <sys/buf.h>
85 #include <sys/vnode.h>
86
87 #include <uvm/uvm.h>
88
89 /*
90 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
91 * in a pass thru the inactive list when swap is full. the value should be
92 * "small"... if it's too large we'll cycle the active pages thru the inactive
93 * queue too quickly to for them to be referenced and avoid being freed.
94 */
95
96 #define UVMPD_NUMDIRTYREACTS 16
97
98
99 /*
100 * local prototypes
101 */
102
103 static void uvmpd_scan(void);
104 static void uvmpd_scan_inactive(struct pglist *);
105 static void uvmpd_tune(void);
106
107 /*
108 * XXX hack to avoid hangs when large processes fork.
109 */
110 int uvm_extrapages;
111
112 /*
113 * uvm_wait: wait (sleep) for the page daemon to free some pages
114 *
115 * => should be called with all locks released
116 * => should _not_ be called by the page daemon (to avoid deadlock)
117 */
118
119 void
120 uvm_wait(const char *wmsg)
121 {
122 int timo = 0;
123 int s = splbio();
124
125 /*
126 * check for page daemon going to sleep (waiting for itself)
127 */
128
129 if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
130 /*
131 * now we have a problem: the pagedaemon wants to go to
132 * sleep until it frees more memory. but how can it
133 * free more memory if it is asleep? that is a deadlock.
134 * we have two options:
135 * [1] panic now
136 * [2] put a timeout on the sleep, thus causing the
137 * pagedaemon to only pause (rather than sleep forever)
138 *
139 * note that option [2] will only help us if we get lucky
140 * and some other process on the system breaks the deadlock
141 * by exiting or freeing memory (thus allowing the pagedaemon
142 * to continue). for now we panic if DEBUG is defined,
143 * otherwise we hope for the best with option [2] (better
144 * yet, this should never happen in the first place!).
145 */
146
147 printf("pagedaemon: deadlock detected!\n");
148 timo = hz >> 3; /* set timeout */
149 #if defined(DEBUG)
150 /* DEBUG: panic so we can debug it */
151 panic("pagedaemon deadlock");
152 #endif
153 }
154
155 simple_lock(&uvm.pagedaemon_lock);
156 wakeup(&uvm.pagedaemon); /* wake the daemon! */
157 UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
158 timo);
159
160 splx(s);
161 }
162
163
164 /*
165 * uvmpd_tune: tune paging parameters
166 *
167 * => called when ever memory is added (or removed?) to the system
168 * => caller must call with page queues locked
169 */
170
171 static void
172 uvmpd_tune(void)
173 {
174 UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
175
176 uvmexp.freemin = uvmexp.npages / 20;
177
178 /* between 16k and 256k */
179 /* XXX: what are these values good for? */
180 uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
181 uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
182
183 /* Make sure there's always a user page free. */
184 if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
185 uvmexp.freemin = uvmexp.reserve_kernel + 1;
186
187 uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
188 if (uvmexp.freetarg <= uvmexp.freemin)
189 uvmexp.freetarg = uvmexp.freemin + 1;
190
191 uvmexp.freetarg += uvm_extrapages;
192 uvm_extrapages = 0;
193
194 /* uvmexp.inactarg: computed in main daemon loop */
195
196 uvmexp.wiredmax = uvmexp.npages / 3;
197 UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
198 uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
199 }
200
201 /*
202 * uvm_pageout: the main loop for the pagedaemon
203 */
204
205 void
206 uvm_pageout(void *arg)
207 {
208 int bufcnt, npages = 0;
209 int extrapages = 0;
210 UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
211
212 UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
213
214 /*
215 * ensure correct priority and set paging parameters...
216 */
217
218 uvm.pagedaemon_proc = curproc;
219 uvm_lock_pageq();
220 npages = uvmexp.npages;
221 uvmpd_tune();
222 uvm_unlock_pageq();
223
224 /*
225 * main loop
226 */
227
228 for (;;) {
229 simple_lock(&uvm.pagedaemon_lock);
230
231 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
232 UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
233 &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
234 uvmexp.pdwoke++;
235 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
236
237 /*
238 * now lock page queues and recompute inactive count
239 */
240
241 uvm_lock_pageq();
242 if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
243 npages = uvmexp.npages;
244 extrapages = uvm_extrapages;
245 uvmpd_tune();
246 }
247
248 uvmexp.inactarg = UVM_PCTPARAM_APPLY(&uvmexp.inactivepct,
249 uvmexp.active + uvmexp.inactive);
250 if (uvmexp.inactarg <= uvmexp.freetarg) {
251 uvmexp.inactarg = uvmexp.freetarg + 1;
252 }
253
254 /*
255 * Estimate a hint. Note that bufmem are returned to
256 * system only when entire pool page is empty.
257 */
258 bufcnt = uvmexp.freetarg - uvmexp.free;
259 if (bufcnt < 0)
260 bufcnt = 0;
261
262 UVMHIST_LOG(pdhist," free/ftarg=%d/%d, inact/itarg=%d/%d",
263 uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
264 uvmexp.inactarg);
265
266 /*
267 * scan if needed
268 */
269
270 if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
271 uvmexp.inactive < uvmexp.inactarg) {
272 uvmpd_scan();
273 }
274
275 /*
276 * if there's any free memory to be had,
277 * wake up any waiters.
278 */
279
280 if (uvmexp.free > uvmexp.reserve_kernel ||
281 uvmexp.paging == 0) {
282 wakeup(&uvmexp.free);
283 }
284
285 /*
286 * scan done. unlock page queues (the only lock we are holding)
287 */
288
289 uvm_unlock_pageq();
290
291 buf_drain(bufcnt << PAGE_SHIFT);
292
293 /*
294 * drain pool resources now that we're not holding any locks
295 */
296
297 pool_drain(0);
298
299 /*
300 * free any cached u-areas we don't need
301 */
302 uvm_uarea_drain(TRUE);
303
304 }
305 /*NOTREACHED*/
306 }
307
308
309 /*
310 * uvm_aiodone_daemon: main loop for the aiodone daemon.
311 */
312
313 void
314 uvm_aiodone_daemon(void *arg)
315 {
316 int s, free;
317 struct buf *bp, *nbp;
318 UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
319
320 for (;;) {
321
322 /*
323 * carefully attempt to go to sleep (without losing "wakeups"!).
324 * we need splbio because we want to make sure the aio_done list
325 * is totally empty before we go to sleep.
326 */
327
328 s = splbio();
329 simple_lock(&uvm.aiodoned_lock);
330 if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
331 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
332 UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
333 &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
334 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
335
336 /* relock aiodoned_lock, still at splbio */
337 simple_lock(&uvm.aiodoned_lock);
338 }
339
340 /*
341 * check for done aio structures
342 */
343
344 bp = TAILQ_FIRST(&uvm.aio_done);
345 if (bp) {
346 TAILQ_INIT(&uvm.aio_done);
347 }
348
349 simple_unlock(&uvm.aiodoned_lock);
350 splx(s);
351
352 /*
353 * process each i/o that's done.
354 */
355
356 free = uvmexp.free;
357 while (bp != NULL) {
358 nbp = TAILQ_NEXT(bp, b_freelist);
359 (*bp->b_iodone)(bp);
360 bp = nbp;
361 }
362 if (free <= uvmexp.reserve_kernel) {
363 s = uvm_lock_fpageq();
364 wakeup(&uvm.pagedaemon);
365 uvm_unlock_fpageq(s);
366 } else {
367 simple_lock(&uvm.pagedaemon_lock);
368 wakeup(&uvmexp.free);
369 simple_unlock(&uvm.pagedaemon_lock);
370 }
371 }
372 }
373
374 #if defined(VMSWAP)
375 struct swapcluster {
376 int swc_slot;
377 int swc_nallocated;
378 int swc_nused;
379 struct vm_page *swc_pages[round_page(MAXPHYS) >> PAGE_SHIFT];
380 };
381
382 static void
383 swapcluster_init(struct swapcluster *swc)
384 {
385
386 swc->swc_slot = 0;
387 }
388
389 static int
390 swapcluster_allocslots(struct swapcluster *swc)
391 {
392 int slot;
393 int npages;
394
395 if (swc->swc_slot != 0) {
396 return 0;
397 }
398
399 /* Even with strange MAXPHYS, the shift
400 implicitly rounds down to a page. */
401 npages = MAXPHYS >> PAGE_SHIFT;
402 slot = uvm_swap_alloc(&npages, TRUE);
403 if (slot == 0) {
404 return ENOMEM;
405 }
406 swc->swc_slot = slot;
407 swc->swc_nallocated = npages;
408 swc->swc_nused = 0;
409
410 return 0;
411 }
412
413 static int
414 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
415 {
416 int slot;
417 struct uvm_object *uobj;
418
419 KASSERT(swc->swc_slot != 0);
420 KASSERT(swc->swc_nused < swc->swc_nallocated);
421 KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
422
423 slot = swc->swc_slot + swc->swc_nused;
424 uobj = pg->uobject;
425 if (uobj == NULL) {
426 LOCK_ASSERT(simple_lock_held(&pg->uanon->an_lock));
427 pg->uanon->an_swslot = slot;
428 } else {
429 int result;
430
431 LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
432 result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
433 if (result == -1) {
434 return ENOMEM;
435 }
436 }
437 swc->swc_pages[swc->swc_nused] = pg;
438 swc->swc_nused++;
439
440 return 0;
441 }
442
443 static void
444 swapcluster_flush(struct swapcluster *swc, boolean_t now)
445 {
446 int slot;
447 int nused;
448 int nallocated;
449 int error;
450
451 if (swc->swc_slot == 0) {
452 return;
453 }
454 KASSERT(swc->swc_nused <= swc->swc_nallocated);
455
456 slot = swc->swc_slot;
457 nused = swc->swc_nused;
458 nallocated = swc->swc_nallocated;
459
460 /*
461 * if this is the final pageout we could have a few
462 * unused swap blocks. if so, free them now.
463 */
464
465 if (nused < nallocated) {
466 if (!now) {
467 return;
468 }
469 uvm_swap_free(slot + nused, nallocated - nused);
470 }
471
472 /*
473 * now start the pageout.
474 */
475
476 uvmexp.pdpageouts++;
477 error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
478 KASSERT(error == 0);
479
480 /*
481 * zero swslot to indicate that we are
482 * no longer building a swap-backed cluster.
483 */
484
485 swc->swc_slot = 0;
486 }
487 #endif /* defined(VMSWAP) */
488
489 /*
490 * uvmpd_scan_inactive: scan an inactive list for pages to clean or free.
491 *
492 * => called with page queues locked
493 * => we work on meeting our free target by converting inactive pages
494 * into free pages.
495 * => we handle the building of swap-backed clusters
496 * => we return TRUE if we are exiting because we met our target
497 */
498
499 static void
500 uvmpd_scan_inactive(struct pglist *pglst)
501 {
502 struct vm_page *p, *nextpg = NULL; /* Quell compiler warning */
503 struct uvm_object *uobj;
504 struct vm_anon *anon;
505 #if defined(VMSWAP)
506 struct swapcluster swc;
507 #endif /* defined(VMSWAP) */
508 struct simplelock *slock;
509 int dirtyreacts, t;
510 boolean_t anonunder, fileunder, execunder;
511 boolean_t anonover, fileover, execover;
512 boolean_t anonreact, filereact, execreact;
513 UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
514
515 /*
516 * swslot is non-zero if we are building a swap cluster. we want
517 * to stay in the loop while we have a page to scan or we have
518 * a swap-cluster to build.
519 */
520
521 #if defined(VMSWAP)
522 swapcluster_init(&swc);
523 #endif /* defined(VMSWAP) */
524 dirtyreacts = 0;
525
526 /*
527 * decide which types of pages we want to reactivate instead of freeing
528 * to keep usage within the minimum and maximum usage limits.
529 */
530
531 t = uvmexp.active + uvmexp.inactive + uvmexp.free;
532 anonunder = (uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8);
533 fileunder = (uvmexp.filepages <= (t * uvmexp.filemin) >> 8);
534 execunder = (uvmexp.execpages <= (t * uvmexp.execmin) >> 8);
535 anonover = uvmexp.anonpages > ((t * uvmexp.anonmax) >> 8);
536 fileover = uvmexp.filepages > ((t * uvmexp.filemax) >> 8);
537 execover = uvmexp.execpages > ((t * uvmexp.execmax) >> 8);
538 anonreact = anonunder || (!anonover && (fileover || execover));
539 filereact = fileunder || (!fileover && (anonover || execover));
540 execreact = execunder || (!execover && (anonover || fileover));
541 if (filereact && execreact && (anonreact || uvm_swapisfull())) {
542 anonreact = filereact = execreact = FALSE;
543 }
544 #if !defined(VMSWAP)
545 /*
546 * XXX no point to put swap-backed pages on the page queue.
547 */
548
549 anonreact = TRUE;
550 #endif /* !defined(VMSWAP) */
551 for (p = TAILQ_FIRST(pglst); p != NULL; p = nextpg) {
552 uobj = NULL;
553 anon = NULL;
554
555 /*
556 * see if we've met the free target.
557 */
558
559 if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
560 dirtyreacts == UVMPD_NUMDIRTYREACTS) {
561 UVMHIST_LOG(pdhist," met free target: "
562 "exit loop", 0, 0, 0, 0);
563 break;
564 }
565
566 /*
567 * we are below target and have a new page to consider.
568 */
569
570 uvmexp.pdscans++;
571 nextpg = TAILQ_NEXT(p, pageq);
572
573 /*
574 * move referenced pages back to active queue and
575 * skip to next page.
576 */
577
578 if (pmap_is_referenced(p)) {
579 uvm_pageactivate(p);
580 uvmexp.pdreact++;
581 continue;
582 }
583 anon = p->uanon;
584 uobj = p->uobject;
585
586 /*
587 * enforce the minimum thresholds on different
588 * types of memory usage. if reusing the current
589 * page would reduce that type of usage below its
590 * minimum, reactivate the page instead and move
591 * on to the next page.
592 */
593
594 if (uobj && UVM_OBJ_IS_VTEXT(uobj) && execreact) {
595 uvm_pageactivate(p);
596 uvmexp.pdreexec++;
597 continue;
598 }
599 if (uobj && UVM_OBJ_IS_VNODE(uobj) &&
600 !UVM_OBJ_IS_VTEXT(uobj) && filereact) {
601 uvm_pageactivate(p);
602 uvmexp.pdrefile++;
603 continue;
604 }
605 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && anonreact) {
606 uvm_pageactivate(p);
607 uvmexp.pdreanon++;
608 continue;
609 }
610
611 /*
612 * first we attempt to lock the object that this page
613 * belongs to. if our attempt fails we skip on to
614 * the next page (no harm done). it is important to
615 * "try" locking the object as we are locking in the
616 * wrong order (pageq -> object) and we don't want to
617 * deadlock.
618 *
619 * the only time we expect to see an ownerless page
620 * (i.e. a page with no uobject and !PQ_ANON) is if an
621 * anon has loaned a page from a uvm_object and the
622 * uvm_object has dropped the ownership. in that
623 * case, the anon can "take over" the loaned page
624 * and make it its own.
625 */
626
627 /* does the page belong to an object? */
628 if (uobj != NULL) {
629 slock = &uobj->vmobjlock;
630 if (!simple_lock_try(slock)) {
631 continue;
632 }
633 if (p->flags & PG_BUSY) {
634 simple_unlock(slock);
635 uvmexp.pdbusy++;
636 continue;
637 }
638 uvmexp.pdobscan++;
639 } else {
640 #if defined(VMSWAP)
641 KASSERT(anon != NULL);
642 slock = &anon->an_lock;
643 if (!simple_lock_try(slock)) {
644 continue;
645 }
646
647 /*
648 * set PQ_ANON if it isn't set already.
649 */
650
651 if ((p->pqflags & PQ_ANON) == 0) {
652 KASSERT(p->loan_count > 0);
653 p->loan_count--;
654 p->pqflags |= PQ_ANON;
655 /* anon now owns it */
656 }
657 if (p->flags & PG_BUSY) {
658 simple_unlock(slock);
659 uvmexp.pdbusy++;
660 continue;
661 }
662 uvmexp.pdanscan++;
663 #else /* defined(VMSWAP) */
664 panic("%s: anon", __func__);
665 #endif /* defined(VMSWAP) */
666 }
667
668
669 /*
670 * we now have the object and the page queues locked.
671 * if the page is not swap-backed, call the object's
672 * pager to flush and free the page.
673 */
674
675 #if defined(READAHEAD_STATS)
676 if ((p->flags & PG_SPECULATIVE) != 0) {
677 p->flags &= ~PG_SPECULATIVE;
678 uvm_ra_miss.ev_count++;
679 }
680 #endif /* defined(READAHEAD_STATS) */
681
682 if ((p->pqflags & PQ_SWAPBACKED) == 0) {
683 uvm_unlock_pageq();
684 (void) (uobj->pgops->pgo_put)(uobj, p->offset,
685 p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
686 uvm_lock_pageq();
687 if (nextpg &&
688 (nextpg->pqflags & PQ_INACTIVE) == 0) {
689 nextpg = TAILQ_FIRST(pglst);
690 }
691 continue;
692 }
693
694 #if defined(VMSWAP)
695 /*
696 * the page is swap-backed. remove all the permissions
697 * from the page so we can sync the modified info
698 * without any race conditions. if the page is clean
699 * we can free it now and continue.
700 */
701
702 pmap_page_protect(p, VM_PROT_NONE);
703 if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
704 p->flags &= ~(PG_CLEAN);
705 }
706 if (p->flags & PG_CLEAN) {
707 int slot;
708 int pageidx;
709
710 pageidx = p->offset >> PAGE_SHIFT;
711 uvm_pagefree(p);
712 uvmexp.pdfreed++;
713
714 /*
715 * for anons, we need to remove the page
716 * from the anon ourselves. for aobjs,
717 * pagefree did that for us.
718 */
719
720 if (anon) {
721 KASSERT(anon->an_swslot != 0);
722 anon->an_page = NULL;
723 slot = anon->an_swslot;
724 } else {
725 slot = uao_find_swslot(uobj, pageidx);
726 }
727 simple_unlock(slock);
728
729 if (slot > 0) {
730 /* this page is now only in swap. */
731 simple_lock(&uvm.swap_data_lock);
732 KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
733 uvmexp.swpgonly++;
734 simple_unlock(&uvm.swap_data_lock);
735 }
736 continue;
737 }
738
739 /*
740 * this page is dirty, skip it if we'll have met our
741 * free target when all the current pageouts complete.
742 */
743
744 if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
745 simple_unlock(slock);
746 continue;
747 }
748
749 /*
750 * free any swap space allocated to the page since
751 * we'll have to write it again with its new data.
752 */
753
754 if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
755 uvm_swap_free(anon->an_swslot, 1);
756 anon->an_swslot = 0;
757 } else if (p->pqflags & PQ_AOBJ) {
758 uao_dropswap(uobj, p->offset >> PAGE_SHIFT);
759 }
760
761 /*
762 * if all pages in swap are only in swap,
763 * the swap space is full and we can't page out
764 * any more swap-backed pages. reactivate this page
765 * so that we eventually cycle all pages through
766 * the inactive queue.
767 */
768
769 if (uvm_swapisfull()) {
770 dirtyreacts++;
771 uvm_pageactivate(p);
772 simple_unlock(slock);
773 continue;
774 }
775
776 /*
777 * start new swap pageout cluster (if necessary).
778 */
779
780 if (swapcluster_allocslots(&swc)) {
781 simple_unlock(slock);
782 continue;
783 }
784
785 /*
786 * at this point, we're definitely going reuse this
787 * page. mark the page busy and delayed-free.
788 * we should remove the page from the page queues
789 * so we don't ever look at it again.
790 * adjust counters and such.
791 */
792
793 p->flags |= PG_BUSY;
794 UVM_PAGE_OWN(p, "scan_inactive");
795
796 p->flags |= PG_PAGEOUT;
797 uvmexp.paging++;
798 uvm_pagedequeue(p);
799
800 uvmexp.pgswapout++;
801 uvm_unlock_pageq();
802
803 /*
804 * add the new page to the cluster.
805 */
806
807 if (swapcluster_add(&swc, p)) {
808 p->flags &= ~(PG_BUSY|PG_PAGEOUT);
809 UVM_PAGE_OWN(p, NULL);
810 uvm_lock_pageq();
811 uvmexp.paging--;
812 uvm_pageactivate(p);
813 simple_unlock(slock);
814 continue;
815 }
816 simple_unlock(slock);
817
818 swapcluster_flush(&swc, FALSE);
819 uvm_lock_pageq();
820
821 #else /* defined(VMSWAP) */
822 panic("%s: swap-backed", __func__);
823 #endif /* defined(VMSWAP) */
824
825 /*
826 * the pageout is in progress. bump counters and set up
827 * for the next loop.
828 */
829
830 uvmexp.pdpending++;
831 if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
832 nextpg = TAILQ_FIRST(pglst);
833 }
834 }
835
836 #if defined(VMSWAP)
837 uvm_unlock_pageq();
838 swapcluster_flush(&swc, TRUE);
839 uvm_lock_pageq();
840 #endif /* defined(VMSWAP) */
841 }
842
843 /*
844 * uvmpd_scan: scan the page queues and attempt to meet our targets.
845 *
846 * => called with pageq's locked
847 */
848
849 static void
850 uvmpd_scan(void)
851 {
852 int inactive_shortage, swap_shortage, pages_freed;
853 struct vm_page *p, *nextpg;
854 struct uvm_object *uobj;
855 struct vm_anon *anon;
856 struct simplelock *slock;
857 UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
858
859 uvmexp.pdrevs++;
860 uobj = NULL;
861 anon = NULL;
862
863 #ifndef __SWAP_BROKEN
864
865 /*
866 * swap out some processes if we are below our free target.
867 * we need to unlock the page queues for this.
868 */
869
870 if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
871 uvmexp.pdswout++;
872 UVMHIST_LOG(pdhist," free %d < target %d: swapout",
873 uvmexp.free, uvmexp.freetarg, 0, 0);
874 uvm_unlock_pageq();
875 uvm_swapout_threads();
876 uvm_lock_pageq();
877
878 }
879 #endif
880
881 /*
882 * now we want to work on meeting our targets. first we work on our
883 * free target by converting inactive pages into free pages. then
884 * we work on meeting our inactive target by converting active pages
885 * to inactive ones.
886 */
887
888 UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
889
890 pages_freed = uvmexp.pdfreed;
891 uvmpd_scan_inactive(&uvm.page_inactive);
892 pages_freed = uvmexp.pdfreed - pages_freed;
893
894 /*
895 * we have done the scan to get free pages. now we work on meeting
896 * our inactive target.
897 */
898
899 inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
900
901 /*
902 * detect if we're not going to be able to page anything out
903 * until we free some swap resources from active pages.
904 */
905
906 swap_shortage = 0;
907 if (uvmexp.free < uvmexp.freetarg &&
908 uvmexp.swpginuse >= uvmexp.swpgavail &&
909 !uvm_swapisfull() &&
910 pages_freed == 0) {
911 swap_shortage = uvmexp.freetarg - uvmexp.free;
912 }
913
914 UVMHIST_LOG(pdhist, " loop 2: inactive_shortage=%d swap_shortage=%d",
915 inactive_shortage, swap_shortage,0,0);
916 for (p = TAILQ_FIRST(&uvm.page_active);
917 p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
918 p = nextpg) {
919 nextpg = TAILQ_NEXT(p, pageq);
920 if (p->flags & PG_BUSY) {
921 continue;
922 }
923
924 /*
925 * lock the page's owner.
926 */
927
928 if (p->uobject != NULL) {
929 uobj = p->uobject;
930 slock = &uobj->vmobjlock;
931 if (!simple_lock_try(slock)) {
932 continue;
933 }
934 } else {
935 anon = p->uanon;
936 KASSERT(anon != NULL);
937 slock = &anon->an_lock;
938 if (!simple_lock_try(slock)) {
939 continue;
940 }
941
942 /* take over the page? */
943 if ((p->pqflags & PQ_ANON) == 0) {
944 KASSERT(p->loan_count > 0);
945 p->loan_count--;
946 p->pqflags |= PQ_ANON;
947 }
948 }
949
950 /*
951 * skip this page if it's busy.
952 */
953
954 if ((p->flags & PG_BUSY) != 0) {
955 simple_unlock(slock);
956 continue;
957 }
958
959 #if defined(VMSWAP)
960 /*
961 * if there's a shortage of swap, free any swap allocated
962 * to this page so that other pages can be paged out.
963 */
964
965 if (swap_shortage > 0) {
966 if ((p->pqflags & PQ_ANON) && anon->an_swslot) {
967 uvm_swap_free(anon->an_swslot, 1);
968 anon->an_swslot = 0;
969 p->flags &= ~PG_CLEAN;
970 swap_shortage--;
971 } else if (p->pqflags & PQ_AOBJ) {
972 int slot = uao_set_swslot(uobj,
973 p->offset >> PAGE_SHIFT, 0);
974 if (slot) {
975 uvm_swap_free(slot, 1);
976 p->flags &= ~PG_CLEAN;
977 swap_shortage--;
978 }
979 }
980 }
981 #endif /* defined(VMSWAP) */
982
983 /*
984 * if there's a shortage of inactive pages, deactivate.
985 */
986
987 if (inactive_shortage > 0) {
988 /* no need to check wire_count as pg is "active" */
989 pmap_clear_reference(p);
990 uvm_pagedeactivate(p);
991 uvmexp.pddeact++;
992 inactive_shortage--;
993 }
994
995 /*
996 * we're done with this page.
997 */
998
999 simple_unlock(slock);
1000 }
1001 }
1002
1003 /*
1004 * uvm_reclaimable: decide whether to wait for pagedaemon.
1005 *
1006 * => return TRUE if it seems to be worth to do uvm_wait.
1007 *
1008 * XXX should be tunable.
1009 * XXX should consider pools, etc?
1010 */
1011
1012 boolean_t
1013 uvm_reclaimable(void)
1014 {
1015 int filepages;
1016
1017 /*
1018 * if swap is not full, no problem.
1019 */
1020
1021 if (!uvm_swapisfull()) {
1022 return TRUE;
1023 }
1024
1025 /*
1026 * file-backed pages can be reclaimed even when swap is full.
1027 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
1028 *
1029 * XXX assume the worst case, ie. all wired pages are file-backed.
1030 *
1031 * XXX should consider about other reclaimable memory.
1032 * XXX ie. pools, traditional buffer cache.
1033 */
1034
1035 filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
1036 if (filepages >= MIN((uvmexp.active + uvmexp.inactive) >> 4,
1037 5 * 1024 * 1024 >> PAGE_SHIFT)) {
1038 return TRUE;
1039 }
1040
1041 /*
1042 * kill the process, fail allocation, etc..
1043 */
1044
1045 return FALSE;
1046 }
1047