uvm_pdaemon.c revision 1.84.4.2 1 1.84.4.2 ad /* $NetBSD: uvm_pdaemon.c,v 1.84.4.2 2007/04/09 22:10:09 ad Exp $ */
2 1.1 mrg
3 1.34 chs /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.34 chs * Copyright (c) 1991, 1993, The Regents of the University of California.
6 1.1 mrg *
7 1.1 mrg * All rights reserved.
8 1.1 mrg *
9 1.1 mrg * This code is derived from software contributed to Berkeley by
10 1.1 mrg * The Mach Operating System project at Carnegie-Mellon University.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor,
23 1.34 chs * Washington University, the University of California, Berkeley and
24 1.1 mrg * its contributors.
25 1.1 mrg * 4. Neither the name of the University nor the names of its contributors
26 1.1 mrg * may be used to endorse or promote products derived from this software
27 1.1 mrg * without specific prior written permission.
28 1.1 mrg *
29 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 1.1 mrg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 1.1 mrg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 1.1 mrg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 1.1 mrg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 1.1 mrg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 1.1 mrg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 1.1 mrg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 1.1 mrg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 1.1 mrg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 1.1 mrg * SUCH DAMAGE.
40 1.1 mrg *
41 1.1 mrg * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
42 1.4 mrg * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43 1.1 mrg *
44 1.1 mrg *
45 1.1 mrg * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 1.1 mrg * All rights reserved.
47 1.34 chs *
48 1.1 mrg * Permission to use, copy, modify and distribute this software and
49 1.1 mrg * its documentation is hereby granted, provided that both the copyright
50 1.1 mrg * notice and this permission notice appear in all copies of the
51 1.1 mrg * software, derivative works or modified versions, and any portions
52 1.1 mrg * thereof, and that both notices appear in supporting documentation.
53 1.34 chs *
54 1.34 chs * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 1.34 chs * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 1.1 mrg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 1.34 chs *
58 1.1 mrg * Carnegie Mellon requests users of this software to return to
59 1.1 mrg *
60 1.1 mrg * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
61 1.1 mrg * School of Computer Science
62 1.1 mrg * Carnegie Mellon University
63 1.1 mrg * Pittsburgh PA 15213-3890
64 1.1 mrg *
65 1.1 mrg * any improvements or extensions that they make and grant Carnegie the
66 1.1 mrg * rights to redistribute these changes.
67 1.1 mrg */
68 1.1 mrg
69 1.1 mrg /*
70 1.1 mrg * uvm_pdaemon.c: the page daemon
71 1.1 mrg */
72 1.42 lukem
73 1.42 lukem #include <sys/cdefs.h>
74 1.84.4.2 ad __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.84.4.2 2007/04/09 22:10:09 ad Exp $");
75 1.42 lukem
76 1.42 lukem #include "opt_uvmhist.h"
77 1.69 yamt #include "opt_readahead.h"
78 1.1 mrg
79 1.1 mrg #include <sys/param.h>
80 1.1 mrg #include <sys/proc.h>
81 1.1 mrg #include <sys/systm.h>
82 1.1 mrg #include <sys/kernel.h>
83 1.9 pk #include <sys/pool.h>
84 1.24 chs #include <sys/buf.h>
85 1.1 mrg
86 1.1 mrg #include <uvm/uvm.h>
87 1.77 yamt #include <uvm/uvm_pdpolicy.h>
88 1.1 mrg
89 1.1 mrg /*
90 1.45 wiz * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
91 1.14 chs * in a pass thru the inactive list when swap is full. the value should be
92 1.14 chs * "small"... if it's too large we'll cycle the active pages thru the inactive
93 1.14 chs * queue too quickly to for them to be referenced and avoid being freed.
94 1.14 chs */
95 1.14 chs
96 1.14 chs #define UVMPD_NUMDIRTYREACTS 16
97 1.14 chs
98 1.14 chs
99 1.14 chs /*
100 1.1 mrg * local prototypes
101 1.1 mrg */
102 1.1 mrg
103 1.65 thorpej static void uvmpd_scan(void);
104 1.77 yamt static void uvmpd_scan_queue(void);
105 1.65 thorpej static void uvmpd_tune(void);
106 1.1 mrg
107 1.1 mrg /*
108 1.61 chs * XXX hack to avoid hangs when large processes fork.
109 1.61 chs */
110 1.61 chs int uvm_extrapages;
111 1.61 chs
112 1.61 chs /*
113 1.1 mrg * uvm_wait: wait (sleep) for the page daemon to free some pages
114 1.1 mrg *
115 1.1 mrg * => should be called with all locks released
116 1.1 mrg * => should _not_ be called by the page daemon (to avoid deadlock)
117 1.1 mrg */
118 1.1 mrg
119 1.19 thorpej void
120 1.65 thorpej uvm_wait(const char *wmsg)
121 1.8 mrg {
122 1.8 mrg int timo = 0;
123 1.8 mrg int s = splbio();
124 1.1 mrg
125 1.8 mrg /*
126 1.8 mrg * check for page daemon going to sleep (waiting for itself)
127 1.8 mrg */
128 1.1 mrg
129 1.84.4.2 ad if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
130 1.8 mrg /*
131 1.8 mrg * now we have a problem: the pagedaemon wants to go to
132 1.8 mrg * sleep until it frees more memory. but how can it
133 1.8 mrg * free more memory if it is asleep? that is a deadlock.
134 1.8 mrg * we have two options:
135 1.8 mrg * [1] panic now
136 1.8 mrg * [2] put a timeout on the sleep, thus causing the
137 1.8 mrg * pagedaemon to only pause (rather than sleep forever)
138 1.8 mrg *
139 1.8 mrg * note that option [2] will only help us if we get lucky
140 1.8 mrg * and some other process on the system breaks the deadlock
141 1.8 mrg * by exiting or freeing memory (thus allowing the pagedaemon
142 1.8 mrg * to continue). for now we panic if DEBUG is defined,
143 1.8 mrg * otherwise we hope for the best with option [2] (better
144 1.8 mrg * yet, this should never happen in the first place!).
145 1.8 mrg */
146 1.1 mrg
147 1.8 mrg printf("pagedaemon: deadlock detected!\n");
148 1.8 mrg timo = hz >> 3; /* set timeout */
149 1.1 mrg #if defined(DEBUG)
150 1.8 mrg /* DEBUG: panic so we can debug it */
151 1.8 mrg panic("pagedaemon deadlock");
152 1.1 mrg #endif
153 1.8 mrg }
154 1.1 mrg
155 1.84.4.1 ad mutex_enter(&uvm_pagedaemon_lock);
156 1.17 thorpej wakeup(&uvm.pagedaemon); /* wake the daemon! */
157 1.84.4.1 ad UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_pagedaemon_lock, false, wmsg,
158 1.8 mrg timo);
159 1.1 mrg
160 1.8 mrg splx(s);
161 1.1 mrg }
162 1.1 mrg
163 1.77 yamt /*
164 1.77 yamt * uvm_kick_pdaemon: perform checks to determine if we need to
165 1.77 yamt * give the pagedaemon a nudge, and do so if necessary.
166 1.77 yamt */
167 1.77 yamt
168 1.77 yamt void
169 1.77 yamt uvm_kick_pdaemon(void)
170 1.77 yamt {
171 1.77 yamt
172 1.77 yamt if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
173 1.77 yamt (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
174 1.77 yamt uvmpdpol_needsscan_p())) {
175 1.77 yamt wakeup(&uvm.pagedaemon);
176 1.77 yamt }
177 1.77 yamt }
178 1.1 mrg
179 1.1 mrg /*
180 1.1 mrg * uvmpd_tune: tune paging parameters
181 1.1 mrg *
182 1.1 mrg * => called when ever memory is added (or removed?) to the system
183 1.1 mrg * => caller must call with page queues locked
184 1.1 mrg */
185 1.1 mrg
186 1.65 thorpej static void
187 1.37 chs uvmpd_tune(void)
188 1.8 mrg {
189 1.8 mrg UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
190 1.1 mrg
191 1.8 mrg uvmexp.freemin = uvmexp.npages / 20;
192 1.1 mrg
193 1.8 mrg /* between 16k and 256k */
194 1.8 mrg /* XXX: what are these values good for? */
195 1.37 chs uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
196 1.37 chs uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
197 1.23 bjh21
198 1.23 bjh21 /* Make sure there's always a user page free. */
199 1.23 bjh21 if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
200 1.23 bjh21 uvmexp.freemin = uvmexp.reserve_kernel + 1;
201 1.1 mrg
202 1.8 mrg uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
203 1.8 mrg if (uvmexp.freetarg <= uvmexp.freemin)
204 1.8 mrg uvmexp.freetarg = uvmexp.freemin + 1;
205 1.1 mrg
206 1.61 chs uvmexp.freetarg += uvm_extrapages;
207 1.61 chs uvm_extrapages = 0;
208 1.61 chs
209 1.8 mrg uvmexp.wiredmax = uvmexp.npages / 3;
210 1.8 mrg UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
211 1.1 mrg uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
212 1.1 mrg }
213 1.1 mrg
214 1.1 mrg /*
215 1.1 mrg * uvm_pageout: the main loop for the pagedaemon
216 1.1 mrg */
217 1.1 mrg
218 1.8 mrg void
219 1.80 yamt uvm_pageout(void *arg)
220 1.8 mrg {
221 1.60 enami int bufcnt, npages = 0;
222 1.61 chs int extrapages = 0;
223 1.8 mrg UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
224 1.24 chs
225 1.8 mrg UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
226 1.8 mrg
227 1.8 mrg /*
228 1.8 mrg * ensure correct priority and set paging parameters...
229 1.8 mrg */
230 1.8 mrg
231 1.84.4.2 ad uvm.pagedaemon_lwp = curlwp;
232 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
233 1.8 mrg npages = uvmexp.npages;
234 1.8 mrg uvmpd_tune();
235 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
236 1.8 mrg
237 1.8 mrg /*
238 1.8 mrg * main loop
239 1.8 mrg */
240 1.24 chs
241 1.24 chs for (;;) {
242 1.84.4.1 ad mutex_enter(&uvm_pagedaemon_lock);
243 1.24 chs
244 1.24 chs UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
245 1.24 chs UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
246 1.84.4.1 ad &uvm_pagedaemon_lock, false, "pgdaemon", 0);
247 1.24 chs uvmexp.pdwoke++;
248 1.24 chs UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
249 1.24 chs
250 1.8 mrg /*
251 1.24 chs * now lock page queues and recompute inactive count
252 1.8 mrg */
253 1.8 mrg
254 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
255 1.61 chs if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
256 1.24 chs npages = uvmexp.npages;
257 1.61 chs extrapages = uvm_extrapages;
258 1.24 chs uvmpd_tune();
259 1.24 chs }
260 1.24 chs
261 1.77 yamt uvmpdpol_tune();
262 1.24 chs
263 1.60 enami /*
264 1.60 enami * Estimate a hint. Note that bufmem are returned to
265 1.60 enami * system only when entire pool page is empty.
266 1.60 enami */
267 1.60 enami bufcnt = uvmexp.freetarg - uvmexp.free;
268 1.60 enami if (bufcnt < 0)
269 1.60 enami bufcnt = 0;
270 1.60 enami
271 1.77 yamt UVMHIST_LOG(pdhist," free/ftarg=%d/%d",
272 1.77 yamt uvmexp.free, uvmexp.freetarg, 0,0);
273 1.8 mrg
274 1.8 mrg /*
275 1.24 chs * scan if needed
276 1.8 mrg */
277 1.8 mrg
278 1.24 chs if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
279 1.77 yamt uvmpdpol_needsscan_p()) {
280 1.24 chs uvmpd_scan();
281 1.8 mrg }
282 1.8 mrg
283 1.8 mrg /*
284 1.24 chs * if there's any free memory to be had,
285 1.24 chs * wake up any waiters.
286 1.8 mrg */
287 1.8 mrg
288 1.24 chs if (uvmexp.free > uvmexp.reserve_kernel ||
289 1.24 chs uvmexp.paging == 0) {
290 1.24 chs wakeup(&uvmexp.free);
291 1.8 mrg }
292 1.1 mrg
293 1.8 mrg /*
294 1.24 chs * scan done. unlock page queues (the only lock we are holding)
295 1.8 mrg */
296 1.8 mrg
297 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
298 1.38 chs
299 1.60 enami buf_drain(bufcnt << PAGE_SHIFT);
300 1.60 enami
301 1.38 chs /*
302 1.38 chs * drain pool resources now that we're not holding any locks
303 1.38 chs */
304 1.38 chs
305 1.38 chs pool_drain(0);
306 1.57 jdolecek
307 1.57 jdolecek /*
308 1.57 jdolecek * free any cached u-areas we don't need
309 1.57 jdolecek */
310 1.84 thorpej uvm_uarea_drain(true);
311 1.57 jdolecek
312 1.24 chs }
313 1.24 chs /*NOTREACHED*/
314 1.24 chs }
315 1.24 chs
316 1.8 mrg
317 1.24 chs /*
318 1.81 yamt * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
319 1.24 chs */
320 1.8 mrg
321 1.24 chs void
322 1.81 yamt uvm_aiodone_worker(struct work *wk, void *dummy)
323 1.24 chs {
324 1.81 yamt struct buf *bp = (void *)wk;
325 1.84.4.1 ad int free;
326 1.9 pk
327 1.81 yamt KASSERT(&bp->b_work == wk);
328 1.8 mrg
329 1.81 yamt /*
330 1.81 yamt * process an i/o that's done.
331 1.81 yamt */
332 1.8 mrg
333 1.81 yamt free = uvmexp.free;
334 1.81 yamt (*bp->b_iodone)(bp);
335 1.81 yamt if (free <= uvmexp.reserve_kernel) {
336 1.84.4.1 ad mutex_enter(&uvm_fpageqlock);
337 1.81 yamt wakeup(&uvm.pagedaemon);
338 1.84.4.1 ad mutex_exit(&uvm_fpageqlock);
339 1.81 yamt } else {
340 1.84.4.1 ad mutex_enter(&uvm_pagedaemon_lock);
341 1.81 yamt wakeup(&uvmexp.free);
342 1.84.4.1 ad mutex_exit(&uvm_pagedaemon_lock);
343 1.8 mrg }
344 1.1 mrg }
345 1.1 mrg
346 1.76 yamt /*
347 1.76 yamt * uvmpd_trylockowner: trylock the page's owner.
348 1.76 yamt *
349 1.76 yamt * => called with pageq locked.
350 1.76 yamt * => resolve orphaned O->A loaned page.
351 1.76 yamt * => return the locked simplelock on success. otherwise, return NULL.
352 1.76 yamt */
353 1.76 yamt
354 1.84.4.1 ad kmutex_t *
355 1.76 yamt uvmpd_trylockowner(struct vm_page *pg)
356 1.76 yamt {
357 1.76 yamt struct uvm_object *uobj = pg->uobject;
358 1.84.4.1 ad kmutex_t *slock;
359 1.84.4.1 ad
360 1.84.4.1 ad KASSERT(mutex_owned(&uvm_pageqlock));
361 1.76 yamt
362 1.76 yamt if (uobj != NULL) {
363 1.76 yamt slock = &uobj->vmobjlock;
364 1.76 yamt } else {
365 1.76 yamt struct vm_anon *anon = pg->uanon;
366 1.76 yamt
367 1.76 yamt KASSERT(anon != NULL);
368 1.76 yamt slock = &anon->an_lock;
369 1.76 yamt }
370 1.76 yamt
371 1.84.4.1 ad if (!mutex_tryenter(slock)) {
372 1.76 yamt return NULL;
373 1.76 yamt }
374 1.76 yamt
375 1.76 yamt if (uobj == NULL) {
376 1.76 yamt
377 1.76 yamt /*
378 1.76 yamt * set PQ_ANON if it isn't set already.
379 1.76 yamt */
380 1.76 yamt
381 1.76 yamt if ((pg->pqflags & PQ_ANON) == 0) {
382 1.76 yamt KASSERT(pg->loan_count > 0);
383 1.76 yamt pg->loan_count--;
384 1.76 yamt pg->pqflags |= PQ_ANON;
385 1.76 yamt /* anon now owns it */
386 1.76 yamt }
387 1.76 yamt }
388 1.76 yamt
389 1.76 yamt return slock;
390 1.76 yamt }
391 1.76 yamt
392 1.73 yamt #if defined(VMSWAP)
393 1.73 yamt struct swapcluster {
394 1.73 yamt int swc_slot;
395 1.73 yamt int swc_nallocated;
396 1.73 yamt int swc_nused;
397 1.75 yamt struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
398 1.73 yamt };
399 1.73 yamt
400 1.73 yamt static void
401 1.73 yamt swapcluster_init(struct swapcluster *swc)
402 1.73 yamt {
403 1.73 yamt
404 1.73 yamt swc->swc_slot = 0;
405 1.73 yamt }
406 1.73 yamt
407 1.73 yamt static int
408 1.73 yamt swapcluster_allocslots(struct swapcluster *swc)
409 1.73 yamt {
410 1.73 yamt int slot;
411 1.73 yamt int npages;
412 1.73 yamt
413 1.73 yamt if (swc->swc_slot != 0) {
414 1.73 yamt return 0;
415 1.73 yamt }
416 1.73 yamt
417 1.73 yamt /* Even with strange MAXPHYS, the shift
418 1.73 yamt implicitly rounds down to a page. */
419 1.73 yamt npages = MAXPHYS >> PAGE_SHIFT;
420 1.84 thorpej slot = uvm_swap_alloc(&npages, true);
421 1.73 yamt if (slot == 0) {
422 1.73 yamt return ENOMEM;
423 1.73 yamt }
424 1.73 yamt swc->swc_slot = slot;
425 1.73 yamt swc->swc_nallocated = npages;
426 1.73 yamt swc->swc_nused = 0;
427 1.73 yamt
428 1.73 yamt return 0;
429 1.73 yamt }
430 1.73 yamt
431 1.73 yamt static int
432 1.73 yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
433 1.73 yamt {
434 1.73 yamt int slot;
435 1.73 yamt struct uvm_object *uobj;
436 1.73 yamt
437 1.73 yamt KASSERT(swc->swc_slot != 0);
438 1.73 yamt KASSERT(swc->swc_nused < swc->swc_nallocated);
439 1.73 yamt KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
440 1.73 yamt
441 1.73 yamt slot = swc->swc_slot + swc->swc_nused;
442 1.73 yamt uobj = pg->uobject;
443 1.73 yamt if (uobj == NULL) {
444 1.84.4.1 ad KASSERT(mutex_owned(&pg->uanon->an_lock));
445 1.73 yamt pg->uanon->an_swslot = slot;
446 1.73 yamt } else {
447 1.73 yamt int result;
448 1.73 yamt
449 1.84.4.1 ad KASSERT(mutex_owned(&uobj->vmobjlock));
450 1.73 yamt result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
451 1.73 yamt if (result == -1) {
452 1.73 yamt return ENOMEM;
453 1.73 yamt }
454 1.73 yamt }
455 1.73 yamt swc->swc_pages[swc->swc_nused] = pg;
456 1.73 yamt swc->swc_nused++;
457 1.73 yamt
458 1.73 yamt return 0;
459 1.73 yamt }
460 1.73 yamt
461 1.73 yamt static void
462 1.83 thorpej swapcluster_flush(struct swapcluster *swc, bool now)
463 1.73 yamt {
464 1.73 yamt int slot;
465 1.73 yamt int nused;
466 1.73 yamt int nallocated;
467 1.73 yamt int error;
468 1.73 yamt
469 1.73 yamt if (swc->swc_slot == 0) {
470 1.73 yamt return;
471 1.73 yamt }
472 1.73 yamt KASSERT(swc->swc_nused <= swc->swc_nallocated);
473 1.73 yamt
474 1.73 yamt slot = swc->swc_slot;
475 1.73 yamt nused = swc->swc_nused;
476 1.73 yamt nallocated = swc->swc_nallocated;
477 1.73 yamt
478 1.73 yamt /*
479 1.73 yamt * if this is the final pageout we could have a few
480 1.73 yamt * unused swap blocks. if so, free them now.
481 1.73 yamt */
482 1.73 yamt
483 1.73 yamt if (nused < nallocated) {
484 1.73 yamt if (!now) {
485 1.73 yamt return;
486 1.73 yamt }
487 1.73 yamt uvm_swap_free(slot + nused, nallocated - nused);
488 1.73 yamt }
489 1.73 yamt
490 1.73 yamt /*
491 1.73 yamt * now start the pageout.
492 1.73 yamt */
493 1.73 yamt
494 1.73 yamt uvmexp.pdpageouts++;
495 1.73 yamt error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
496 1.73 yamt KASSERT(error == 0);
497 1.73 yamt
498 1.73 yamt /*
499 1.73 yamt * zero swslot to indicate that we are
500 1.73 yamt * no longer building a swap-backed cluster.
501 1.73 yamt */
502 1.73 yamt
503 1.73 yamt swc->swc_slot = 0;
504 1.73 yamt }
505 1.77 yamt
506 1.77 yamt /*
507 1.77 yamt * uvmpd_dropswap: free any swap allocated to this page.
508 1.77 yamt *
509 1.77 yamt * => called with owner locked.
510 1.84 thorpej * => return true if a page had an associated slot.
511 1.77 yamt */
512 1.77 yamt
513 1.83 thorpej static bool
514 1.77 yamt uvmpd_dropswap(struct vm_page *pg)
515 1.77 yamt {
516 1.84 thorpej bool result = false;
517 1.77 yamt struct vm_anon *anon = pg->uanon;
518 1.77 yamt
519 1.77 yamt if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
520 1.77 yamt uvm_swap_free(anon->an_swslot, 1);
521 1.77 yamt anon->an_swslot = 0;
522 1.77 yamt pg->flags &= ~PG_CLEAN;
523 1.84 thorpej result = true;
524 1.77 yamt } else if (pg->pqflags & PQ_AOBJ) {
525 1.77 yamt int slot = uao_set_swslot(pg->uobject,
526 1.77 yamt pg->offset >> PAGE_SHIFT, 0);
527 1.77 yamt if (slot) {
528 1.77 yamt uvm_swap_free(slot, 1);
529 1.77 yamt pg->flags &= ~PG_CLEAN;
530 1.84 thorpej result = true;
531 1.77 yamt }
532 1.77 yamt }
533 1.77 yamt
534 1.77 yamt return result;
535 1.77 yamt }
536 1.77 yamt
537 1.77 yamt /*
538 1.77 yamt * uvmpd_trydropswap: try to free any swap allocated to this page.
539 1.77 yamt *
540 1.84 thorpej * => return true if a slot is successfully freed.
541 1.77 yamt */
542 1.77 yamt
543 1.83 thorpej bool
544 1.77 yamt uvmpd_trydropswap(struct vm_page *pg)
545 1.77 yamt {
546 1.84.4.1 ad kmutex_t *slock;
547 1.83 thorpej bool result;
548 1.77 yamt
549 1.77 yamt if ((pg->flags & PG_BUSY) != 0) {
550 1.84 thorpej return false;
551 1.77 yamt }
552 1.77 yamt
553 1.77 yamt /*
554 1.77 yamt * lock the page's owner.
555 1.77 yamt */
556 1.77 yamt
557 1.77 yamt slock = uvmpd_trylockowner(pg);
558 1.77 yamt if (slock == NULL) {
559 1.84 thorpej return false;
560 1.77 yamt }
561 1.77 yamt
562 1.77 yamt /*
563 1.77 yamt * skip this page if it's busy.
564 1.77 yamt */
565 1.77 yamt
566 1.77 yamt if ((pg->flags & PG_BUSY) != 0) {
567 1.84.4.1 ad mutex_exit(slock);
568 1.84 thorpej return false;
569 1.77 yamt }
570 1.77 yamt
571 1.77 yamt result = uvmpd_dropswap(pg);
572 1.77 yamt
573 1.84.4.1 ad mutex_exit(slock);
574 1.77 yamt
575 1.77 yamt return result;
576 1.77 yamt }
577 1.77 yamt
578 1.73 yamt #endif /* defined(VMSWAP) */
579 1.73 yamt
580 1.1 mrg /*
581 1.77 yamt * uvmpd_scan_queue: scan an replace candidate list for pages
582 1.77 yamt * to clean or free.
583 1.1 mrg *
584 1.1 mrg * => called with page queues locked
585 1.1 mrg * => we work on meeting our free target by converting inactive pages
586 1.1 mrg * into free pages.
587 1.1 mrg * => we handle the building of swap-backed clusters
588 1.1 mrg */
589 1.1 mrg
590 1.65 thorpej static void
591 1.77 yamt uvmpd_scan_queue(void)
592 1.8 mrg {
593 1.77 yamt struct vm_page *p;
594 1.8 mrg struct uvm_object *uobj;
595 1.37 chs struct vm_anon *anon;
596 1.68 yamt #if defined(VMSWAP)
597 1.73 yamt struct swapcluster swc;
598 1.68 yamt #endif /* defined(VMSWAP) */
599 1.77 yamt int dirtyreacts;
600 1.84.4.1 ad kmutex_t *slock;
601 1.77 yamt UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
602 1.1 mrg
603 1.8 mrg /*
604 1.8 mrg * swslot is non-zero if we are building a swap cluster. we want
605 1.24 chs * to stay in the loop while we have a page to scan or we have
606 1.8 mrg * a swap-cluster to build.
607 1.8 mrg */
608 1.24 chs
609 1.73 yamt #if defined(VMSWAP)
610 1.73 yamt swapcluster_init(&swc);
611 1.73 yamt #endif /* defined(VMSWAP) */
612 1.77 yamt
613 1.14 chs dirtyreacts = 0;
614 1.77 yamt uvmpdpol_scaninit();
615 1.43 chs
616 1.77 yamt while (/* CONSTCOND */ 1) {
617 1.24 chs
618 1.73 yamt /*
619 1.73 yamt * see if we've met the free target.
620 1.73 yamt */
621 1.73 yamt
622 1.73 yamt if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
623 1.73 yamt dirtyreacts == UVMPD_NUMDIRTYREACTS) {
624 1.73 yamt UVMHIST_LOG(pdhist," met free target: "
625 1.73 yamt "exit loop", 0, 0, 0, 0);
626 1.73 yamt break;
627 1.73 yamt }
628 1.24 chs
629 1.77 yamt p = uvmpdpol_selectvictim();
630 1.77 yamt if (p == NULL) {
631 1.77 yamt break;
632 1.77 yamt }
633 1.77 yamt KASSERT(uvmpdpol_pageisqueued_p(p));
634 1.77 yamt KASSERT(p->wire_count == 0);
635 1.77 yamt
636 1.73 yamt /*
637 1.73 yamt * we are below target and have a new page to consider.
638 1.73 yamt */
639 1.30 chs
640 1.73 yamt anon = p->uanon;
641 1.73 yamt uobj = p->uobject;
642 1.8 mrg
643 1.73 yamt /*
644 1.73 yamt * first we attempt to lock the object that this page
645 1.73 yamt * belongs to. if our attempt fails we skip on to
646 1.73 yamt * the next page (no harm done). it is important to
647 1.73 yamt * "try" locking the object as we are locking in the
648 1.73 yamt * wrong order (pageq -> object) and we don't want to
649 1.73 yamt * deadlock.
650 1.73 yamt *
651 1.73 yamt * the only time we expect to see an ownerless page
652 1.73 yamt * (i.e. a page with no uobject and !PQ_ANON) is if an
653 1.73 yamt * anon has loaned a page from a uvm_object and the
654 1.73 yamt * uvm_object has dropped the ownership. in that
655 1.73 yamt * case, the anon can "take over" the loaned page
656 1.73 yamt * and make it its own.
657 1.73 yamt */
658 1.30 chs
659 1.76 yamt slock = uvmpd_trylockowner(p);
660 1.76 yamt if (slock == NULL) {
661 1.76 yamt continue;
662 1.76 yamt }
663 1.76 yamt if (p->flags & PG_BUSY) {
664 1.84.4.1 ad mutex_exit(slock);
665 1.76 yamt uvmexp.pdbusy++;
666 1.76 yamt continue;
667 1.76 yamt }
668 1.76 yamt
669 1.73 yamt /* does the page belong to an object? */
670 1.73 yamt if (uobj != NULL) {
671 1.73 yamt uvmexp.pdobscan++;
672 1.73 yamt } else {
673 1.73 yamt #if defined(VMSWAP)
674 1.73 yamt KASSERT(anon != NULL);
675 1.73 yamt uvmexp.pdanscan++;
676 1.68 yamt #else /* defined(VMSWAP) */
677 1.73 yamt panic("%s: anon", __func__);
678 1.68 yamt #endif /* defined(VMSWAP) */
679 1.73 yamt }
680 1.8 mrg
681 1.37 chs
682 1.73 yamt /*
683 1.73 yamt * we now have the object and the page queues locked.
684 1.73 yamt * if the page is not swap-backed, call the object's
685 1.73 yamt * pager to flush and free the page.
686 1.73 yamt */
687 1.37 chs
688 1.69 yamt #if defined(READAHEAD_STATS)
689 1.77 yamt if ((p->pqflags & PQ_READAHEAD) != 0) {
690 1.77 yamt p->pqflags &= ~PQ_READAHEAD;
691 1.73 yamt uvm_ra_miss.ev_count++;
692 1.73 yamt }
693 1.69 yamt #endif /* defined(READAHEAD_STATS) */
694 1.69 yamt
695 1.73 yamt if ((p->pqflags & PQ_SWAPBACKED) == 0) {
696 1.82 alc KASSERT(uobj != NULL);
697 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
698 1.73 yamt (void) (uobj->pgops->pgo_put)(uobj, p->offset,
699 1.73 yamt p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
700 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
701 1.73 yamt continue;
702 1.73 yamt }
703 1.37 chs
704 1.73 yamt /*
705 1.73 yamt * the page is swap-backed. remove all the permissions
706 1.73 yamt * from the page so we can sync the modified info
707 1.73 yamt * without any race conditions. if the page is clean
708 1.73 yamt * we can free it now and continue.
709 1.73 yamt */
710 1.8 mrg
711 1.73 yamt pmap_page_protect(p, VM_PROT_NONE);
712 1.73 yamt if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
713 1.73 yamt p->flags &= ~(PG_CLEAN);
714 1.73 yamt }
715 1.73 yamt if (p->flags & PG_CLEAN) {
716 1.73 yamt int slot;
717 1.73 yamt int pageidx;
718 1.73 yamt
719 1.73 yamt pageidx = p->offset >> PAGE_SHIFT;
720 1.73 yamt uvm_pagefree(p);
721 1.73 yamt uvmexp.pdfreed++;
722 1.8 mrg
723 1.8 mrg /*
724 1.73 yamt * for anons, we need to remove the page
725 1.73 yamt * from the anon ourselves. for aobjs,
726 1.73 yamt * pagefree did that for us.
727 1.8 mrg */
728 1.24 chs
729 1.73 yamt if (anon) {
730 1.73 yamt KASSERT(anon->an_swslot != 0);
731 1.73 yamt anon->an_page = NULL;
732 1.73 yamt slot = anon->an_swslot;
733 1.73 yamt } else {
734 1.73 yamt slot = uao_find_swslot(uobj, pageidx);
735 1.8 mrg }
736 1.84.4.1 ad mutex_exit(slock);
737 1.8 mrg
738 1.73 yamt if (slot > 0) {
739 1.73 yamt /* this page is now only in swap. */
740 1.84.4.1 ad mutex_enter(&uvm_swap_data_lock);
741 1.73 yamt KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
742 1.73 yamt uvmexp.swpgonly++;
743 1.84.4.1 ad mutex_exit(&uvm_swap_data_lock);
744 1.37 chs }
745 1.73 yamt continue;
746 1.73 yamt }
747 1.37 chs
748 1.77 yamt #if defined(VMSWAP)
749 1.73 yamt /*
750 1.73 yamt * this page is dirty, skip it if we'll have met our
751 1.73 yamt * free target when all the current pageouts complete.
752 1.73 yamt */
753 1.24 chs
754 1.73 yamt if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
755 1.84.4.1 ad mutex_exit(slock);
756 1.73 yamt continue;
757 1.73 yamt }
758 1.14 chs
759 1.73 yamt /*
760 1.73 yamt * free any swap space allocated to the page since
761 1.73 yamt * we'll have to write it again with its new data.
762 1.73 yamt */
763 1.24 chs
764 1.77 yamt uvmpd_dropswap(p);
765 1.14 chs
766 1.73 yamt /*
767 1.73 yamt * if all pages in swap are only in swap,
768 1.73 yamt * the swap space is full and we can't page out
769 1.73 yamt * any more swap-backed pages. reactivate this page
770 1.73 yamt * so that we eventually cycle all pages through
771 1.73 yamt * the inactive queue.
772 1.73 yamt */
773 1.68 yamt
774 1.73 yamt if (uvm_swapisfull()) {
775 1.73 yamt dirtyreacts++;
776 1.73 yamt uvm_pageactivate(p);
777 1.84.4.1 ad mutex_exit(slock);
778 1.73 yamt continue;
779 1.8 mrg }
780 1.8 mrg
781 1.8 mrg /*
782 1.73 yamt * start new swap pageout cluster (if necessary).
783 1.8 mrg */
784 1.24 chs
785 1.73 yamt if (swapcluster_allocslots(&swc)) {
786 1.84.4.1 ad mutex_exit(slock);
787 1.77 yamt dirtyreacts++; /* XXX */
788 1.73 yamt continue;
789 1.8 mrg }
790 1.8 mrg
791 1.8 mrg /*
792 1.73 yamt * at this point, we're definitely going reuse this
793 1.73 yamt * page. mark the page busy and delayed-free.
794 1.73 yamt * we should remove the page from the page queues
795 1.73 yamt * so we don't ever look at it again.
796 1.73 yamt * adjust counters and such.
797 1.8 mrg */
798 1.8 mrg
799 1.73 yamt p->flags |= PG_BUSY;
800 1.77 yamt UVM_PAGE_OWN(p, "scan_queue");
801 1.73 yamt
802 1.73 yamt p->flags |= PG_PAGEOUT;
803 1.73 yamt uvmexp.paging++;
804 1.73 yamt uvm_pagedequeue(p);
805 1.73 yamt
806 1.73 yamt uvmexp.pgswapout++;
807 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
808 1.8 mrg
809 1.8 mrg /*
810 1.73 yamt * add the new page to the cluster.
811 1.8 mrg */
812 1.8 mrg
813 1.73 yamt if (swapcluster_add(&swc, p)) {
814 1.73 yamt p->flags &= ~(PG_BUSY|PG_PAGEOUT);
815 1.73 yamt UVM_PAGE_OWN(p, NULL);
816 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
817 1.73 yamt uvmexp.paging--;
818 1.77 yamt dirtyreacts++;
819 1.73 yamt uvm_pageactivate(p);
820 1.84.4.1 ad mutex_exit(slock);
821 1.73 yamt continue;
822 1.73 yamt }
823 1.84.4.1 ad mutex_exit(slock);
824 1.73 yamt
825 1.84 thorpej swapcluster_flush(&swc, false);
826 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
827 1.73 yamt
828 1.8 mrg /*
829 1.31 chs * the pageout is in progress. bump counters and set up
830 1.31 chs * for the next loop.
831 1.8 mrg */
832 1.8 mrg
833 1.31 chs uvmexp.pdpending++;
834 1.77 yamt
835 1.77 yamt #else /* defined(VMSWAP) */
836 1.77 yamt uvm_pageactivate(p);
837 1.84.4.1 ad mutex_exit(slock);
838 1.77 yamt #endif /* defined(VMSWAP) */
839 1.73 yamt }
840 1.73 yamt
841 1.73 yamt #if defined(VMSWAP)
842 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
843 1.84 thorpej swapcluster_flush(&swc, true);
844 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
845 1.68 yamt #endif /* defined(VMSWAP) */
846 1.1 mrg }
847 1.1 mrg
848 1.1 mrg /*
849 1.1 mrg * uvmpd_scan: scan the page queues and attempt to meet our targets.
850 1.1 mrg *
851 1.1 mrg * => called with pageq's locked
852 1.1 mrg */
853 1.1 mrg
854 1.65 thorpej static void
855 1.37 chs uvmpd_scan(void)
856 1.1 mrg {
857 1.77 yamt int swap_shortage, pages_freed;
858 1.8 mrg UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
859 1.1 mrg
860 1.37 chs uvmexp.pdrevs++;
861 1.1 mrg
862 1.1 mrg #ifndef __SWAP_BROKEN
863 1.39 chs
864 1.8 mrg /*
865 1.8 mrg * swap out some processes if we are below our free target.
866 1.8 mrg * we need to unlock the page queues for this.
867 1.8 mrg */
868 1.39 chs
869 1.39 chs if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
870 1.8 mrg uvmexp.pdswout++;
871 1.37 chs UVMHIST_LOG(pdhist," free %d < target %d: swapout",
872 1.37 chs uvmexp.free, uvmexp.freetarg, 0, 0);
873 1.84.4.1 ad mutex_exit(&uvm_pageqlock);
874 1.8 mrg uvm_swapout_threads();
875 1.84.4.1 ad mutex_enter(&uvm_pageqlock);
876 1.1 mrg
877 1.8 mrg }
878 1.1 mrg #endif
879 1.1 mrg
880 1.8 mrg /*
881 1.8 mrg * now we want to work on meeting our targets. first we work on our
882 1.8 mrg * free target by converting inactive pages into free pages. then
883 1.8 mrg * we work on meeting our inactive target by converting active pages
884 1.8 mrg * to inactive ones.
885 1.8 mrg */
886 1.8 mrg
887 1.8 mrg UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
888 1.8 mrg
889 1.14 chs pages_freed = uvmexp.pdfreed;
890 1.77 yamt uvmpd_scan_queue();
891 1.14 chs pages_freed = uvmexp.pdfreed - pages_freed;
892 1.8 mrg
893 1.8 mrg /*
894 1.14 chs * detect if we're not going to be able to page anything out
895 1.14 chs * until we free some swap resources from active pages.
896 1.14 chs */
897 1.24 chs
898 1.14 chs swap_shortage = 0;
899 1.14 chs if (uvmexp.free < uvmexp.freetarg &&
900 1.52 pk uvmexp.swpginuse >= uvmexp.swpgavail &&
901 1.52 pk !uvm_swapisfull() &&
902 1.14 chs pages_freed == 0) {
903 1.14 chs swap_shortage = uvmexp.freetarg - uvmexp.free;
904 1.14 chs }
905 1.24 chs
906 1.77 yamt uvmpdpol_balancequeue(swap_shortage);
907 1.1 mrg }
908 1.62 yamt
909 1.62 yamt /*
910 1.62 yamt * uvm_reclaimable: decide whether to wait for pagedaemon.
911 1.62 yamt *
912 1.84 thorpej * => return true if it seems to be worth to do uvm_wait.
913 1.62 yamt *
914 1.62 yamt * XXX should be tunable.
915 1.62 yamt * XXX should consider pools, etc?
916 1.62 yamt */
917 1.62 yamt
918 1.83 thorpej bool
919 1.62 yamt uvm_reclaimable(void)
920 1.62 yamt {
921 1.62 yamt int filepages;
922 1.77 yamt int active, inactive;
923 1.62 yamt
924 1.62 yamt /*
925 1.62 yamt * if swap is not full, no problem.
926 1.62 yamt */
927 1.62 yamt
928 1.62 yamt if (!uvm_swapisfull()) {
929 1.84 thorpej return true;
930 1.62 yamt }
931 1.62 yamt
932 1.62 yamt /*
933 1.62 yamt * file-backed pages can be reclaimed even when swap is full.
934 1.62 yamt * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
935 1.62 yamt *
936 1.62 yamt * XXX assume the worst case, ie. all wired pages are file-backed.
937 1.63 yamt *
938 1.63 yamt * XXX should consider about other reclaimable memory.
939 1.63 yamt * XXX ie. pools, traditional buffer cache.
940 1.62 yamt */
941 1.62 yamt
942 1.62 yamt filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
943 1.77 yamt uvm_estimatepageable(&active, &inactive);
944 1.77 yamt if (filepages >= MIN((active + inactive) >> 4,
945 1.62 yamt 5 * 1024 * 1024 >> PAGE_SHIFT)) {
946 1.84 thorpej return true;
947 1.62 yamt }
948 1.62 yamt
949 1.62 yamt /*
950 1.62 yamt * kill the process, fail allocation, etc..
951 1.62 yamt */
952 1.62 yamt
953 1.84 thorpej return false;
954 1.62 yamt }
955 1.77 yamt
956 1.77 yamt void
957 1.77 yamt uvm_estimatepageable(int *active, int *inactive)
958 1.77 yamt {
959 1.77 yamt
960 1.77 yamt uvmpdpol_estimatepageable(active, inactive);
961 1.77 yamt }
962