uvm_pdaemon.c revision 1.114 1 /* $NetBSD: uvm_pdaemon.c,v 1.114 2019/12/14 15:04:47 ad Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
37 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_pdaemon.c: the page daemon
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.114 2019/12/14 15:04:47 ad Exp $");
70
71 #include "opt_uvmhist.h"
72 #include "opt_readahead.h"
73
74 #include <sys/param.h>
75 #include <sys/proc.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/pool.h>
79 #include <sys/buf.h>
80 #include <sys/module.h>
81 #include <sys/atomic.h>
82 #include <sys/kthread.h>
83
84 #include <uvm/uvm.h>
85 #include <uvm/uvm_pdpolicy.h>
86
87 #ifdef UVMHIST
88 UVMHIST_DEFINE(pdhist);
89 #endif
90
91 /*
92 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
93 * in a pass thru the inactive list when swap is full. the value should be
94 * "small"... if it's too large we'll cycle the active pages thru the inactive
95 * queue too quickly to for them to be referenced and avoid being freed.
96 */
97
98 #define UVMPD_NUMDIRTYREACTS 16
99
100 #define UVMPD_NUMTRYLOCKOWNER 128
101
102 /*
103 * local prototypes
104 */
105
106 static void uvmpd_scan(void);
107 static void uvmpd_scan_queue(void);
108 static void uvmpd_tune(void);
109 static void uvmpd_pool_drain_thread(void *);
110 static void uvmpd_pool_drain_wakeup(void);
111
112 static unsigned int uvm_pagedaemon_waiters;
113
114 /* State for the pool drainer thread */
115 static kmutex_t uvmpd_pool_drain_lock __cacheline_aligned;
116 static kcondvar_t uvmpd_pool_drain_cv;
117 static bool uvmpd_pool_drain_run = false;
118
119 /*
120 * XXX hack to avoid hangs when large processes fork.
121 */
122 u_int uvm_extrapages;
123
124 /*
125 * uvm_wait: wait (sleep) for the page daemon to free some pages
126 *
127 * => should be called with all locks released
128 * => should _not_ be called by the page daemon (to avoid deadlock)
129 */
130
131 void
132 uvm_wait(const char *wmsg)
133 {
134 int timo = 0;
135
136 if (uvm.pagedaemon_lwp == NULL)
137 panic("out of memory before the pagedaemon thread exists");
138
139 mutex_spin_enter(&uvm_fpageqlock);
140
141 /*
142 * check for page daemon going to sleep (waiting for itself)
143 */
144
145 if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
146 /*
147 * now we have a problem: the pagedaemon wants to go to
148 * sleep until it frees more memory. but how can it
149 * free more memory if it is asleep? that is a deadlock.
150 * we have two options:
151 * [1] panic now
152 * [2] put a timeout on the sleep, thus causing the
153 * pagedaemon to only pause (rather than sleep forever)
154 *
155 * note that option [2] will only help us if we get lucky
156 * and some other process on the system breaks the deadlock
157 * by exiting or freeing memory (thus allowing the pagedaemon
158 * to continue). for now we panic if DEBUG is defined,
159 * otherwise we hope for the best with option [2] (better
160 * yet, this should never happen in the first place!).
161 */
162
163 printf("pagedaemon: deadlock detected!\n");
164 timo = hz >> 3; /* set timeout */
165 #if defined(DEBUG)
166 /* DEBUG: panic so we can debug it */
167 panic("pagedaemon deadlock");
168 #endif
169 }
170
171 uvm_pagedaemon_waiters++;
172 wakeup(&uvm.pagedaemon); /* wake the daemon! */
173 UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
174 }
175
176 /*
177 * uvm_kick_pdaemon: perform checks to determine if we need to
178 * give the pagedaemon a nudge, and do so if necessary.
179 *
180 * => called with uvm_fpageqlock held.
181 */
182
183 void
184 uvm_kick_pdaemon(void)
185 {
186
187 KASSERT(mutex_owned(&uvm_fpageqlock));
188
189 if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
190 (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
191 uvmpdpol_needsscan_p()) ||
192 uvm_km_va_starved_p()) {
193 wakeup(&uvm.pagedaemon);
194 }
195 }
196
197 /*
198 * uvmpd_tune: tune paging parameters
199 *
200 * => called when ever memory is added (or removed?) to the system
201 */
202
203 static void
204 uvmpd_tune(void)
205 {
206 int val;
207
208 UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
209
210 /*
211 * try to keep 0.5% of available RAM free, but limit to between
212 * 128k and 1024k per-CPU. XXX: what are these values good for?
213 */
214 val = uvmexp.npages / 200;
215 val = MAX(val, (128*1024) >> PAGE_SHIFT);
216 val = MIN(val, (1024*1024) >> PAGE_SHIFT);
217 val *= ncpu;
218
219 /* Make sure there's always a user page free. */
220 if (val < uvmexp.reserve_kernel + 1)
221 val = uvmexp.reserve_kernel + 1;
222 uvmexp.freemin = val;
223
224 /* Calculate free target. */
225 val = (uvmexp.freemin * 4) / 3;
226 if (val <= uvmexp.freemin)
227 val = uvmexp.freemin + 1;
228 uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
229
230 uvmexp.wiredmax = uvmexp.npages / 3;
231 UVMHIST_LOG(pdhist, "<- done, freemin=%jd, freetarg=%jd, wiredmax=%jd",
232 uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
233 }
234
235 /*
236 * uvm_pageout: the main loop for the pagedaemon
237 */
238
239 void
240 uvm_pageout(void *arg)
241 {
242 int npages = 0;
243 int extrapages = 0;
244
245 UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
246
247 UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
248
249 mutex_init(&uvmpd_pool_drain_lock, MUTEX_DEFAULT, IPL_VM);
250 cv_init(&uvmpd_pool_drain_cv, "pooldrain");
251
252 /* Create the pool drainer kernel thread. */
253 if (kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL,
254 uvmpd_pool_drain_thread, NULL, NULL, "pooldrain"))
255 panic("fork pooldrain");
256
257 /*
258 * ensure correct priority and set paging parameters...
259 */
260
261 uvm.pagedaemon_lwp = curlwp;
262 npages = uvmexp.npages;
263 uvmpd_tune();
264
265 /*
266 * main loop
267 */
268
269 for (;;) {
270 bool needsscan, needsfree, kmem_va_starved;
271
272 kmem_va_starved = uvm_km_va_starved_p();
273
274 mutex_spin_enter(&uvm_fpageqlock);
275 if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
276 !kmem_va_starved) {
277 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
278 UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
279 &uvm_fpageqlock, false, "pgdaemon", 0);
280 uvmexp.pdwoke++;
281 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
282 } else {
283 mutex_spin_exit(&uvm_fpageqlock);
284 }
285
286 /*
287 * now recompute inactive count
288 */
289
290 if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
291 npages = uvmexp.npages;
292 extrapages = uvm_extrapages;
293 mutex_spin_enter(&uvm_fpageqlock);
294 uvmpd_tune();
295 mutex_spin_exit(&uvm_fpageqlock);
296 }
297
298 uvmpdpol_tune();
299
300 /*
301 * Estimate a hint. Note that bufmem are returned to
302 * system only when entire pool page is empty.
303 */
304 mutex_spin_enter(&uvm_fpageqlock);
305
306 UVMHIST_LOG(pdhist," free/ftarg=%jd/%jd",
307 uvmexp.free, uvmexp.freetarg, 0,0);
308
309 needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
310 needsscan = needsfree || uvmpdpol_needsscan_p();
311
312 /*
313 * scan if needed
314 */
315 if (needsscan) {
316 mutex_spin_exit(&uvm_fpageqlock);
317 uvmpd_scan();
318 mutex_spin_enter(&uvm_fpageqlock);
319 }
320
321 /*
322 * if there's any free memory to be had,
323 * wake up any waiters.
324 */
325 if (uvmexp.free > uvmexp.reserve_kernel ||
326 uvmexp.paging == 0) {
327 wakeup(&uvmexp.free);
328 uvm_pagedaemon_waiters = 0;
329 }
330 mutex_spin_exit(&uvm_fpageqlock);
331
332 /*
333 * scan done. if we don't need free memory, we're done.
334 */
335
336 if (!needsfree && !kmem_va_starved)
337 continue;
338
339 /*
340 * kick the pool drainer thread.
341 */
342
343 uvmpd_pool_drain_wakeup();
344 }
345 /*NOTREACHED*/
346 }
347
348
349 /*
350 * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
351 */
352
353 void
354 uvm_aiodone_worker(struct work *wk, void *dummy)
355 {
356 struct buf *bp = (void *)wk;
357
358 KASSERT(&bp->b_work == wk);
359
360 /*
361 * process an i/o that's done.
362 */
363
364 (*bp->b_iodone)(bp);
365 }
366
367 void
368 uvm_pageout_start(int npages)
369 {
370
371 atomic_inc_uint(&uvmexp.pdpending);
372 atomic_add_int(&uvmexp.paging, npages);
373 }
374
375 void
376 uvm_pageout_done(int npages)
377 {
378
379 KASSERT(uvmexp.paging >= npages);
380 atomic_dec_uint(&uvmexp.pdpending);
381 atomic_add_int(&uvmexp.paging, -npages);
382
383 /*
384 * wake up either of pagedaemon or LWPs waiting for it.
385 */
386
387 mutex_spin_enter(&uvm_fpageqlock);
388 if (uvmexp.free <= uvmexp.reserve_kernel) {
389 wakeup(&uvm.pagedaemon);
390 } else {
391 wakeup(&uvmexp.free);
392 uvm_pagedaemon_waiters = 0;
393 }
394 mutex_spin_exit(&uvm_fpageqlock);
395 }
396
397 /*
398 * uvmpd_trylockowner: trylock the page's owner.
399 *
400 * => called with page interlock held.
401 * => resolve orphaned O->A loaned page.
402 * => return the locked mutex on success. otherwise, return NULL.
403 */
404
405 kmutex_t *
406 uvmpd_trylockowner(struct vm_page *pg)
407 {
408 struct uvm_object *uobj = pg->uobject;
409 struct vm_anon *anon = pg->uanon;
410 int tries, count;
411 bool running;
412 kmutex_t *slock;
413
414 KASSERT(mutex_owned(&pg->interlock));
415
416 if (uobj != NULL) {
417 slock = uobj->vmobjlock;
418 KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj);
419 } else if (anon != NULL) {
420 slock = anon->an_lock;
421 KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon);
422 } else {
423 /* Page may be in state of flux - ignore. */
424 mutex_exit(&pg->interlock);
425 return NULL;
426 }
427
428 /*
429 * Now try to lock the objects. We'll try hard, but don't really
430 * plan on spending more than a millisecond or so here.
431 */
432 tries = (curlwp == uvm.pagedaemon_lwp ? UVMPD_NUMTRYLOCKOWNER : 1);
433 for (;;) {
434 if (mutex_tryenter(slock)) {
435 if (uobj == NULL) {
436 /*
437 * set PG_ANON if it isn't set already.
438 */
439 if ((pg->flags & PG_ANON) == 0) {
440 KASSERT(pg->loan_count > 0);
441 pg->loan_count--;
442 pg->flags |= PG_ANON;
443 /* anon now owns it */
444 }
445 }
446 mutex_exit(&pg->interlock);
447 return slock;
448 }
449 running = mutex_owner_running(slock);
450 if (!running || --tries <= 0) {
451 break;
452 }
453 count = SPINLOCK_BACKOFF_MAX;
454 SPINLOCK_BACKOFF(count);
455 }
456
457 /*
458 * We didn't get the lock; chances are the very next page on the
459 * queue also has the same lock, so if the lock owner is not running
460 * take a breather and allow them to make progress. There could be
461 * only 1 CPU in the system, or the pagedaemon could have preempted
462 * the owner in kernel, or any number of other things could be going
463 * on.
464 */
465 mutex_exit(&pg->interlock);
466 if (curlwp == uvm.pagedaemon_lwp) {
467 if (!running) {
468 (void)kpause("pdpglock", false, 1, NULL);
469 }
470 uvmexp.pdbusy++;
471 }
472 return NULL;
473 }
474
475 #if defined(VMSWAP)
476 struct swapcluster {
477 int swc_slot;
478 int swc_nallocated;
479 int swc_nused;
480 struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
481 };
482
483 static void
484 swapcluster_init(struct swapcluster *swc)
485 {
486
487 swc->swc_slot = 0;
488 swc->swc_nused = 0;
489 }
490
491 static int
492 swapcluster_allocslots(struct swapcluster *swc)
493 {
494 int slot;
495 int npages;
496
497 if (swc->swc_slot != 0) {
498 return 0;
499 }
500
501 /* Even with strange MAXPHYS, the shift
502 implicitly rounds down to a page. */
503 npages = MAXPHYS >> PAGE_SHIFT;
504 slot = uvm_swap_alloc(&npages, true);
505 if (slot == 0) {
506 return ENOMEM;
507 }
508 swc->swc_slot = slot;
509 swc->swc_nallocated = npages;
510 swc->swc_nused = 0;
511
512 return 0;
513 }
514
515 static int
516 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
517 {
518 int slot;
519 struct uvm_object *uobj;
520
521 KASSERT(swc->swc_slot != 0);
522 KASSERT(swc->swc_nused < swc->swc_nallocated);
523 KASSERT((pg->flags & PG_SWAPBACKED) != 0);
524
525 slot = swc->swc_slot + swc->swc_nused;
526 uobj = pg->uobject;
527 if (uobj == NULL) {
528 KASSERT(mutex_owned(pg->uanon->an_lock));
529 pg->uanon->an_swslot = slot;
530 } else {
531 int result;
532
533 KASSERT(mutex_owned(uobj->vmobjlock));
534 result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
535 if (result == -1) {
536 return ENOMEM;
537 }
538 }
539 swc->swc_pages[swc->swc_nused] = pg;
540 swc->swc_nused++;
541
542 return 0;
543 }
544
545 static void
546 swapcluster_flush(struct swapcluster *swc, bool now)
547 {
548 int slot;
549 int nused;
550 int nallocated;
551 int error __diagused;
552
553 if (swc->swc_slot == 0) {
554 return;
555 }
556 KASSERT(swc->swc_nused <= swc->swc_nallocated);
557
558 slot = swc->swc_slot;
559 nused = swc->swc_nused;
560 nallocated = swc->swc_nallocated;
561
562 /*
563 * if this is the final pageout we could have a few
564 * unused swap blocks. if so, free them now.
565 */
566
567 if (nused < nallocated) {
568 if (!now) {
569 return;
570 }
571 uvm_swap_free(slot + nused, nallocated - nused);
572 }
573
574 /*
575 * now start the pageout.
576 */
577
578 if (nused > 0) {
579 uvmexp.pdpageouts++;
580 uvm_pageout_start(nused);
581 error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
582 KASSERT(error == 0 || error == ENOMEM);
583 }
584
585 /*
586 * zero swslot to indicate that we are
587 * no longer building a swap-backed cluster.
588 */
589
590 swc->swc_slot = 0;
591 swc->swc_nused = 0;
592 }
593
594 static int
595 swapcluster_nused(struct swapcluster *swc)
596 {
597
598 return swc->swc_nused;
599 }
600
601 /*
602 * uvmpd_dropswap: free any swap allocated to this page.
603 *
604 * => called with owner locked.
605 * => return true if a page had an associated slot.
606 */
607
608 static bool
609 uvmpd_dropswap(struct vm_page *pg)
610 {
611 bool result = false;
612 struct vm_anon *anon = pg->uanon;
613
614 if ((pg->flags & PG_ANON) && anon->an_swslot) {
615 uvm_swap_free(anon->an_swslot, 1);
616 anon->an_swslot = 0;
617 pg->flags &= ~PG_CLEAN;
618 result = true;
619 } else if (pg->flags & PG_AOBJ) {
620 int slot = uao_set_swslot(pg->uobject,
621 pg->offset >> PAGE_SHIFT, 0);
622 if (slot) {
623 uvm_swap_free(slot, 1);
624 pg->flags &= ~PG_CLEAN;
625 result = true;
626 }
627 }
628
629 return result;
630 }
631
632 /*
633 * uvmpd_trydropswap: try to free any swap allocated to this page.
634 *
635 * => return true if a slot is successfully freed.
636 * => page interlock must be held, and will be dropped.
637 */
638
639 bool
640 uvmpd_trydropswap(struct vm_page *pg)
641 {
642 kmutex_t *slock;
643 bool result;
644
645 if ((pg->flags & PG_BUSY) != 0) {
646 mutex_exit(&pg->interlock);
647 return false;
648 }
649
650 /*
651 * lock the page's owner.
652 * this will drop pg->interlock.
653 */
654
655 slock = uvmpd_trylockowner(pg);
656 if (slock == NULL) {
657 return false;
658 }
659
660 /*
661 * skip this page if it's busy.
662 */
663
664 if ((pg->flags & PG_BUSY) != 0) {
665 mutex_exit(slock);
666 return false;
667 }
668
669 result = uvmpd_dropswap(pg);
670
671 mutex_exit(slock);
672
673 return result;
674 }
675
676 #endif /* defined(VMSWAP) */
677
678 /*
679 * uvmpd_scan_queue: scan an replace candidate list for pages
680 * to clean or free.
681 *
682 * => we work on meeting our free target by converting inactive pages
683 * into free pages.
684 * => we handle the building of swap-backed clusters
685 */
686
687 static void
688 uvmpd_scan_queue(void)
689 {
690 struct vm_page *p;
691 struct uvm_object *uobj;
692 struct vm_anon *anon;
693 #if defined(VMSWAP)
694 struct swapcluster swc;
695 #endif /* defined(VMSWAP) */
696 int dirtyreacts;
697 kmutex_t *slock;
698 UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
699
700 /*
701 * swslot is non-zero if we are building a swap cluster. we want
702 * to stay in the loop while we have a page to scan or we have
703 * a swap-cluster to build.
704 */
705
706 #if defined(VMSWAP)
707 swapcluster_init(&swc);
708 #endif /* defined(VMSWAP) */
709
710 dirtyreacts = 0;
711 uvmpdpol_scaninit();
712
713 while (/* CONSTCOND */ 1) {
714
715 /*
716 * see if we've met the free target.
717 */
718
719 if (uvmexp.free + uvmexp.paging
720 #if defined(VMSWAP)
721 + swapcluster_nused(&swc)
722 #endif /* defined(VMSWAP) */
723 >= uvmexp.freetarg << 2 ||
724 dirtyreacts == UVMPD_NUMDIRTYREACTS) {
725 UVMHIST_LOG(pdhist," met free target: "
726 "exit loop", 0, 0, 0, 0);
727 break;
728 }
729
730 /*
731 * first we have the pdpolicy select a victim page
732 * and attempt to lock the object that the page
733 * belongs to. if our attempt fails we skip on to
734 * the next page (no harm done). it is important to
735 * "try" locking the object as we are locking in the
736 * wrong order (pageq -> object) and we don't want to
737 * deadlock.
738 *
739 * the only time we expect to see an ownerless page
740 * (i.e. a page with no uobject and !PG_ANON) is if an
741 * anon has loaned a page from a uvm_object and the
742 * uvm_object has dropped the ownership. in that
743 * case, the anon can "take over" the loaned page
744 * and make it its own.
745 */
746
747 p = uvmpdpol_selectvictim(&slock);
748 if (p == NULL) {
749 break;
750 }
751 KASSERT(uvmpdpol_pageisqueued_p(p));
752 KASSERT(uvm_page_locked_p(p));
753 KASSERT(p->wire_count == 0);
754
755 /*
756 * we are below target and have a new page to consider.
757 */
758
759 anon = p->uanon;
760 uobj = p->uobject;
761
762 if (p->flags & PG_BUSY) {
763 mutex_exit(slock);
764 uvmexp.pdbusy++;
765 continue;
766 }
767
768 /* does the page belong to an object? */
769 if (uobj != NULL) {
770 uvmexp.pdobscan++;
771 } else {
772 #if defined(VMSWAP)
773 KASSERT(anon != NULL);
774 uvmexp.pdanscan++;
775 #else /* defined(VMSWAP) */
776 panic("%s: anon", __func__);
777 #endif /* defined(VMSWAP) */
778 }
779
780
781 /*
782 * we now have the object locked.
783 * if the page is not swap-backed, call the object's
784 * pager to flush and free the page.
785 */
786
787 #if defined(READAHEAD_STATS)
788 if ((p->flags & PG_READAHEAD) != 0) {
789 p->flags &= ~PG_READAHEAD;
790 uvm_ra_miss.ev_count++;
791 }
792 #endif /* defined(READAHEAD_STATS) */
793
794 if ((p->flags & PG_SWAPBACKED) == 0) {
795 KASSERT(uobj != NULL);
796 (void) (uobj->pgops->pgo_put)(uobj, p->offset,
797 p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
798 continue;
799 }
800
801 /*
802 * the page is swap-backed. remove all the permissions
803 * from the page so we can sync the modified info
804 * without any race conditions. if the page is clean
805 * we can free it now and continue.
806 */
807
808 pmap_page_protect(p, VM_PROT_NONE);
809 if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
810 p->flags &= ~(PG_CLEAN);
811 }
812 if (p->flags & PG_CLEAN) {
813 int slot;
814 int pageidx;
815
816 pageidx = p->offset >> PAGE_SHIFT;
817 uvm_pagefree(p);
818 atomic_inc_uint(&uvmexp.pdfreed);
819
820 /*
821 * for anons, we need to remove the page
822 * from the anon ourselves. for aobjs,
823 * pagefree did that for us.
824 */
825
826 if (anon) {
827 KASSERT(anon->an_swslot != 0);
828 anon->an_page = NULL;
829 slot = anon->an_swslot;
830 } else {
831 slot = uao_find_swslot(uobj, pageidx);
832 }
833 if (slot > 0) {
834 /* this page is now only in swap. */
835 KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
836 atomic_inc_uint(&uvmexp.swpgonly);
837 }
838 mutex_exit(slock);
839 continue;
840 }
841
842 #if defined(VMSWAP)
843 /*
844 * this page is dirty, skip it if we'll have met our
845 * free target when all the current pageouts complete.
846 */
847
848 if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
849 mutex_exit(slock);
850 continue;
851 }
852
853 /*
854 * free any swap space allocated to the page since
855 * we'll have to write it again with its new data.
856 */
857
858 uvmpd_dropswap(p);
859
860 /*
861 * start new swap pageout cluster (if necessary).
862 *
863 * if swap is full reactivate this page so that
864 * we eventually cycle all pages through the
865 * inactive queue.
866 */
867
868 if (swapcluster_allocslots(&swc)) {
869 dirtyreacts++;
870 uvm_pageactivate(p);
871 mutex_exit(slock);
872 continue;
873 }
874
875 /*
876 * at this point, we're definitely going reuse this
877 * page. mark the page busy and delayed-free.
878 * we should remove the page from the page queues
879 * so we don't ever look at it again.
880 * adjust counters and such.
881 */
882
883 p->flags |= PG_BUSY;
884 UVM_PAGE_OWN(p, "scan_queue");
885 p->flags |= PG_PAGEOUT;
886 uvmexp.pgswapout++;
887
888 uvm_pagedequeue(p);
889
890 /*
891 * add the new page to the cluster.
892 */
893
894 if (swapcluster_add(&swc, p)) {
895 p->flags &= ~(PG_BUSY|PG_PAGEOUT);
896 UVM_PAGE_OWN(p, NULL);
897 dirtyreacts++;
898 uvm_pageactivate(p);
899 mutex_exit(slock);
900 continue;
901 }
902 mutex_exit(slock);
903
904 /*
905 * set the pageout in progress. bump counters and set up
906 * for the next loop.
907 */
908
909 swapcluster_flush(&swc, false);
910
911 #else /* defined(VMSWAP) */
912 uvm_pageactivate(p);
913 mutex_exit(slock);
914 #endif /* defined(VMSWAP) */
915 }
916
917 #if defined(VMSWAP)
918 swapcluster_flush(&swc, true);
919 #endif /* defined(VMSWAP) */
920 }
921
922 /*
923 * uvmpd_scan: scan the page queues and attempt to meet our targets.
924 */
925
926 static void
927 uvmpd_scan(void)
928 {
929 int swap_shortage, pages_freed;
930 UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
931
932 uvmexp.pdrevs++;
933
934 /*
935 * work on meeting our targets. first we work on our free target
936 * by converting inactive pages into free pages. then we work on
937 * meeting our inactive target by converting active pages to
938 * inactive ones.
939 */
940
941 UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
942
943 pages_freed = uvmexp.pdfreed;
944 uvmpd_scan_queue();
945 pages_freed = uvmexp.pdfreed - pages_freed;
946
947 /*
948 * detect if we're not going to be able to page anything out
949 * until we free some swap resources from active pages.
950 */
951
952 swap_shortage = 0;
953 if (uvmexp.free < uvmexp.freetarg &&
954 uvmexp.swpginuse >= uvmexp.swpgavail &&
955 !uvm_swapisfull() &&
956 pages_freed == 0) {
957 swap_shortage = uvmexp.freetarg - uvmexp.free;
958 }
959
960 uvmpdpol_balancequeue(swap_shortage);
961
962 /*
963 * if still below the minimum target, try unloading kernel
964 * modules.
965 */
966
967 if (uvmexp.free < uvmexp.freemin) {
968 module_thread_kick();
969 }
970 }
971
972 /*
973 * uvm_reclaimable: decide whether to wait for pagedaemon.
974 *
975 * => return true if it seems to be worth to do uvm_wait.
976 *
977 * XXX should be tunable.
978 * XXX should consider pools, etc?
979 */
980
981 bool
982 uvm_reclaimable(void)
983 {
984 int filepages;
985 int active, inactive;
986
987 /*
988 * if swap is not full, no problem.
989 */
990
991 if (!uvm_swapisfull()) {
992 return true;
993 }
994
995 /*
996 * file-backed pages can be reclaimed even when swap is full.
997 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
998 *
999 * XXX assume the worst case, ie. all wired pages are file-backed.
1000 *
1001 * XXX should consider about other reclaimable memory.
1002 * XXX ie. pools, traditional buffer cache.
1003 */
1004
1005 filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
1006 uvm_estimatepageable(&active, &inactive);
1007 if (filepages >= MIN((active + inactive) >> 4,
1008 5 * 1024 * 1024 >> PAGE_SHIFT)) {
1009 return true;
1010 }
1011
1012 /*
1013 * kill the process, fail allocation, etc..
1014 */
1015
1016 return false;
1017 }
1018
1019 void
1020 uvm_estimatepageable(int *active, int *inactive)
1021 {
1022
1023 uvmpdpol_estimatepageable(active, inactive);
1024 }
1025
1026
1027 /*
1028 * Use a separate thread for draining pools.
1029 * This work can't done from the main pagedaemon thread because
1030 * some pool allocators need to take vm_map locks.
1031 */
1032
1033 static void
1034 uvmpd_pool_drain_thread(void *arg)
1035 {
1036 int bufcnt;
1037
1038 for (;;) {
1039 mutex_enter(&uvmpd_pool_drain_lock);
1040 if (!uvmpd_pool_drain_run) {
1041 cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock);
1042 }
1043 uvmpd_pool_drain_run = false;
1044 mutex_exit(&uvmpd_pool_drain_lock);
1045
1046 /*
1047 * kill unused metadata buffers.
1048 */
1049 mutex_spin_enter(&uvm_fpageqlock);
1050 bufcnt = uvmexp.freetarg - uvmexp.free;
1051 mutex_spin_exit(&uvm_fpageqlock);
1052 if (bufcnt < 0)
1053 bufcnt = 0;
1054
1055 mutex_enter(&bufcache_lock);
1056 buf_drain(bufcnt << PAGE_SHIFT);
1057 mutex_exit(&bufcache_lock);
1058
1059 /*
1060 * drain a pool.
1061 */
1062 pool_drain(NULL);
1063 }
1064 /*NOTREACHED*/
1065 }
1066
1067 static void
1068 uvmpd_pool_drain_wakeup(void)
1069 {
1070
1071 mutex_enter(&uvmpd_pool_drain_lock);
1072 uvmpd_pool_drain_run = true;
1073 cv_signal(&uvmpd_pool_drain_cv);
1074 mutex_exit(&uvmpd_pool_drain_lock);
1075 }
1076