uvm_pager.c revision 1.105 1 /* $NetBSD: uvm_pager.c,v 1.105 2011/09/28 22:52:15 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
28 */
29
30 /*
31 * uvm_pager.c: generic functions used to assist the pagers.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.105 2011/09/28 22:52:15 matt Exp $");
36
37 #include "opt_uvmhist.h"
38 #include "opt_readahead.h"
39 #include "opt_pagermap.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/vnode.h>
44 #include <sys/buf.h>
45
46 #include <uvm/uvm.h>
47
48 /*
49 * XXX
50 * this is needed until the device strategy interface
51 * is changed to do physically-addressed i/o.
52 */
53
54 #ifndef PAGER_MAP_DEFAULT_SIZE
55 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024)
56 #endif
57
58 #ifndef PAGER_MAP_SIZE
59 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE
60 #endif
61
62 size_t pager_map_size = PAGER_MAP_SIZE;
63
64 /*
65 * list of uvm pagers in the system
66 */
67
68 const struct uvm_pagerops * const uvmpagerops[] = {
69 &aobj_pager,
70 &uvm_deviceops,
71 &uvm_vnodeops,
72 &ubc_pager,
73 };
74
75 /*
76 * the pager map: provides KVA for I/O
77 */
78
79 struct vm_map *pager_map; /* XXX */
80 kmutex_t pager_map_wanted_lock;
81 bool pager_map_wanted; /* locked by pager map */
82 static vaddr_t emergva;
83 static int emerg_ncolors;
84 static bool emerginuse;
85
86 void
87 uvm_pager_realloc_emerg(void)
88 {
89 vaddr_t new_emergva, old_emergva;
90 int old_emerg_ncolors;
91
92 if (__predict_true(emergva != 0 && emerg_ncolors >= uvmexp.ncolors))
93 return;
94
95 KASSERT(!emerginuse);
96
97 new_emergva = uvm_km_alloc(kernel_map,
98 round_page(MAXPHYS) + ptoa(uvmexp.ncolors), 0,
99 UVM_KMF_VAONLY);
100
101 KASSERT(new_emergva != 0);
102
103 old_emergva = emergva;
104 old_emerg_ncolors = emerg_ncolors;
105
106 /*
107 * don't support re-color in late boot anyway.
108 */
109 if (0) /* XXX */
110 mutex_enter(&pager_map_wanted_lock);
111
112 emergva = new_emergva;
113 emerg_ncolors = uvmexp.ncolors;
114 wakeup(&old_emergva);
115
116 if (0) /* XXX */
117 mutex_exit(&pager_map_wanted_lock);
118
119 if (old_emergva)
120 uvm_km_free(kernel_map, old_emergva,
121 round_page(MAXPHYS) + ptoa(old_emerg_ncolors),
122 UVM_KMF_VAONLY);
123 }
124
125 /*
126 * uvm_pager_init: init pagers (at boot time)
127 */
128
129 void
130 uvm_pager_init(void)
131 {
132 u_int lcv;
133 vaddr_t sva, eva;
134
135 /*
136 * init pager map
137 */
138
139 sva = 0;
140 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
141 false, NULL);
142 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
143 pager_map_wanted = false;
144
145 uvm_pager_realloc_emerg();
146
147 /*
148 * init ASYNC I/O queue
149 */
150
151 TAILQ_INIT(&uvm.aio_done);
152
153 /*
154 * call pager init functions
155 */
156 for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
157 if (uvmpagerops[lcv]->pgo_init)
158 uvmpagerops[lcv]->pgo_init();
159 }
160 }
161
162 /*
163 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
164 *
165 * we basically just map in a blank map entry to reserve the space in the
166 * map and then use pmap_enter() to put the mappings in by hand.
167 */
168
169 vaddr_t
170 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
171 {
172 vsize_t size;
173 vaddr_t kva;
174 vaddr_t cva;
175 struct vm_page *pp;
176 vm_prot_t prot;
177 const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
178 const u_int first_color = VM_PGCOLOR_BUCKET(*pps);
179 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
180
181 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, first_color=%u)",
182 pps, npages, first_color, 0);
183
184 /*
185 * compute protection. outgoing I/O only needs read
186 * access to the page, whereas incoming needs read/write.
187 */
188
189 prot = VM_PROT_READ;
190 if (flags & UVMPAGER_MAPIN_READ)
191 prot |= VM_PROT_WRITE;
192
193 ReStart:
194 size = ptoa(npages);
195 kva = 0; /* let system choose VA */
196
197 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET,
198 first_color, UVM_FLAG_COLORMATCH | UVM_FLAG_NOMERGE
199 | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
200 if (pdaemon) {
201 mutex_enter(&pager_map_wanted_lock);
202 if (emerginuse) {
203 UVM_UNLOCK_AND_WAIT(&emergva,
204 &pager_map_wanted_lock, false,
205 "emergva", 0);
206 goto ReStart;
207 }
208 emerginuse = true;
209 mutex_exit(&pager_map_wanted_lock);
210 kva = emergva + ptoa(first_color);
211 /* The shift implicitly truncates to PAGE_SIZE */
212 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
213 goto enter;
214 }
215 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
216 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
217 return(0);
218 }
219 mutex_enter(&pager_map_wanted_lock);
220 pager_map_wanted = true;
221 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
222 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
223 "pager_map", 0);
224 goto ReStart;
225 }
226
227 enter:
228 /* got it */
229 for (cva = kva; npages != 0; npages--, cva += PAGE_SIZE) {
230 pp = *pps++;
231 KASSERT(pp);
232 // KASSERT(!((VM_PAGE_TO_PHYS(pp) ^ cva) & uvmexp.colormask));
233 KASSERT(pp->flags & PG_BUSY);
234 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
235 }
236 pmap_update(vm_map_pmap(pager_map));
237
238 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
239 return(kva);
240 }
241
242 /*
243 * uvm_pagermapout: remove pager_map mapping
244 *
245 * we remove our mappings by hand and then remove the mapping (waking
246 * up anyone wanting space).
247 */
248
249 void
250 uvm_pagermapout(vaddr_t kva, int npages)
251 {
252 vsize_t size = ptoa(npages);
253 struct vm_map_entry *entries;
254 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
255
256 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
257
258 /*
259 * duplicate uvm_unmap, but add in pager_map_wanted handling.
260 */
261
262 pmap_kremove(kva, size);
263 pmap_update(pmap_kernel());
264
265 if ((kva & ~ptoa(uvmexp.colormask)) == emergva) {
266 mutex_enter(&pager_map_wanted_lock);
267 emerginuse = false;
268 wakeup(&emergva);
269 mutex_exit(&pager_map_wanted_lock);
270 return;
271 }
272
273 vm_map_lock(pager_map);
274 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
275 mutex_enter(&pager_map_wanted_lock);
276 if (pager_map_wanted) {
277 pager_map_wanted = false;
278 wakeup(pager_map);
279 }
280 mutex_exit(&pager_map_wanted_lock);
281 vm_map_unlock(pager_map);
282 if (entries)
283 uvm_unmap_detach(entries, 0);
284 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
285 }
286
287 /*
288 * interrupt-context iodone handler for single-buf i/os
289 * or the top-level buf of a nested-buf i/o.
290 */
291
292 void
293 uvm_aio_biodone(struct buf *bp)
294 {
295 /* reset b_iodone for when this is a single-buf i/o. */
296 bp->b_iodone = uvm_aio_aiodone;
297
298 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
299 }
300
301 void
302 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
303 {
304 struct uvm_object *uobj;
305 struct vm_page *pg;
306 kmutex_t *slock;
307 int pageout_done;
308 int swslot;
309 int i;
310 bool swap;
311 UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
312
313 swslot = 0;
314 pageout_done = 0;
315 slock = NULL;
316 uobj = NULL;
317 pg = pgs[0];
318 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
319 (pg->pqflags & PQ_AOBJ) != 0;
320 if (!swap) {
321 uobj = pg->uobject;
322 slock = uobj->vmobjlock;
323 mutex_enter(slock);
324 mutex_enter(&uvm_pageqlock);
325 } else {
326 #if defined(VMSWAP)
327 if (error) {
328 if (pg->uobject != NULL) {
329 swslot = uao_find_swslot(pg->uobject,
330 pg->offset >> PAGE_SHIFT);
331 } else {
332 KASSERT(pg->uanon != NULL);
333 swslot = pg->uanon->an_swslot;
334 }
335 KASSERT(swslot);
336 }
337 #else /* defined(VMSWAP) */
338 panic("%s: swap", __func__);
339 #endif /* defined(VMSWAP) */
340 }
341 for (i = 0; i < npages; i++) {
342 #if defined(VMSWAP)
343 bool anon_disposed = false; /* XXX gcc */
344 #endif /* defined(VMSWAP) */
345
346 pg = pgs[i];
347 KASSERT(swap || pg->uobject == uobj);
348 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
349
350 #if defined(VMSWAP)
351 /*
352 * for swap i/os, lock each page's object (or anon)
353 * individually since each page may need a different lock.
354 */
355
356 if (swap) {
357 if (pg->uobject != NULL) {
358 slock = pg->uobject->vmobjlock;
359 } else {
360 slock = pg->uanon->an_lock;
361 }
362 mutex_enter(slock);
363 mutex_enter(&uvm_pageqlock);
364 anon_disposed = (pg->flags & PG_RELEASED) != 0;
365 KASSERT(!anon_disposed || pg->uobject != NULL ||
366 pg->uanon->an_ref == 0);
367 }
368 #endif /* defined(VMSWAP) */
369
370 /*
371 * process errors. for reads, just mark the page to be freed.
372 * for writes, if the error was ENOMEM, we assume this was
373 * a transient failure so we mark the page dirty so that
374 * we'll try to write it again later. for all other write
375 * errors, we assume the error is permanent, thus the data
376 * in the page is lost. bummer.
377 */
378
379 if (error) {
380 int slot;
381 if (!write) {
382 pg->flags |= PG_RELEASED;
383 continue;
384 } else if (error == ENOMEM) {
385 if (pg->flags & PG_PAGEOUT) {
386 pg->flags &= ~PG_PAGEOUT;
387 pageout_done++;
388 }
389 pg->flags &= ~PG_CLEAN;
390 uvm_pageactivate(pg);
391 slot = 0;
392 } else
393 slot = SWSLOT_BAD;
394
395 #if defined(VMSWAP)
396 if (swap) {
397 if (pg->uobject != NULL) {
398 int oldslot;
399 oldslot = uao_set_swslot(pg->uobject,
400 pg->offset >> PAGE_SHIFT, slot);
401 KASSERT(oldslot == swslot + i);
402 } else {
403 KASSERT(pg->uanon->an_swslot ==
404 swslot + i);
405 pg->uanon->an_swslot = slot;
406 }
407 }
408 #endif /* defined(VMSWAP) */
409 }
410
411 /*
412 * if the page is PG_FAKE, this must have been a read to
413 * initialize the page. clear PG_FAKE and activate the page.
414 * we must also clear the pmap "modified" flag since it may
415 * still be set from the page's previous identity.
416 */
417
418 if (pg->flags & PG_FAKE) {
419 KASSERT(!write);
420 pg->flags &= ~PG_FAKE;
421 #if defined(READAHEAD_STATS)
422 pg->pqflags |= PQ_READAHEAD;
423 uvm_ra_total.ev_count++;
424 #endif /* defined(READAHEAD_STATS) */
425 KASSERT((pg->flags & PG_CLEAN) != 0);
426 uvm_pageenqueue(pg);
427 pmap_clear_modify(pg);
428 }
429
430 /*
431 * do accounting for pagedaemon i/o and arrange to free
432 * the pages instead of just unbusying them.
433 */
434
435 if (pg->flags & PG_PAGEOUT) {
436 pg->flags &= ~PG_PAGEOUT;
437 pageout_done++;
438 uvmexp.pdfreed++;
439 pg->flags |= PG_RELEASED;
440 }
441
442 #if defined(VMSWAP)
443 /*
444 * for swap pages, unlock everything for this page now.
445 */
446
447 if (swap) {
448 if (pg->uobject == NULL && anon_disposed) {
449 mutex_exit(&uvm_pageqlock);
450 uvm_anon_release(pg->uanon);
451 } else {
452 uvm_page_unbusy(&pg, 1);
453 mutex_exit(&uvm_pageqlock);
454 mutex_exit(slock);
455 }
456 }
457 #endif /* defined(VMSWAP) */
458 }
459 uvm_pageout_done(pageout_done);
460 if (!swap) {
461 uvm_page_unbusy(pgs, npages);
462 mutex_exit(&uvm_pageqlock);
463 mutex_exit(slock);
464 } else {
465 #if defined(VMSWAP)
466 KASSERT(write);
467
468 /* these pages are now only in swap. */
469 mutex_enter(&uvm_swap_data_lock);
470 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
471 if (error != ENOMEM)
472 uvmexp.swpgonly += npages;
473 mutex_exit(&uvm_swap_data_lock);
474 if (error) {
475 if (error != ENOMEM)
476 uvm_swap_markbad(swslot, npages);
477 else
478 uvm_swap_free(swslot, npages);
479 }
480 uvmexp.pdpending--;
481 #endif /* defined(VMSWAP) */
482 }
483 }
484
485 /*
486 * uvm_aio_aiodone: do iodone processing for async i/os.
487 * this should be called in thread context, not interrupt context.
488 */
489
490 void
491 uvm_aio_aiodone(struct buf *bp)
492 {
493 int npages = bp->b_bufsize >> PAGE_SHIFT;
494 struct vm_page *pgs[npages];
495 int i, error;
496 bool write;
497 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
498 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
499
500 error = bp->b_error;
501 write = (bp->b_flags & B_READ) == 0;
502
503 for (i = 0; i < npages; i++) {
504 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
505 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
506 }
507 uvm_pagermapout((vaddr_t)bp->b_data, npages);
508
509 uvm_aio_aiodone_pages(pgs, npages, write, error);
510
511 if (write && (bp->b_cflags & BC_AGE) != 0) {
512 mutex_enter(bp->b_objlock);
513 vwakeup(bp);
514 mutex_exit(bp->b_objlock);
515 }
516 putiobuf(bp);
517 }
518
519 /*
520 * uvm_pageratop: convert KVAs in the pager map back to their page
521 * structures.
522 */
523
524 struct vm_page *
525 uvm_pageratop(vaddr_t kva)
526 {
527 struct vm_page *pg;
528 paddr_t pa;
529 bool rv;
530
531 rv = pmap_extract(pmap_kernel(), kva, &pa);
532 KASSERT(rv);
533 pg = PHYS_TO_VM_PAGE(pa);
534 KASSERT(pg != NULL);
535 return (pg);
536 }
537