uvm_pager.c revision 1.104 1 /* $NetBSD: uvm_pager.c,v 1.104 2011/09/01 06:40:28 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
28 */
29
30 /*
31 * uvm_pager.c: generic functions used to assist the pagers.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.104 2011/09/01 06:40:28 matt Exp $");
36
37 #include "opt_uvmhist.h"
38 #include "opt_readahead.h"
39 #include "opt_pagermap.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/vnode.h>
44 #include <sys/buf.h>
45
46 #include <uvm/uvm.h>
47
48 /*
49 * XXX
50 * this is needed until the device strategy interface
51 * is changed to do physically-addressed i/o.
52 */
53
54 #ifndef PAGER_MAP_DEFAULT_SIZE
55 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024)
56 #endif
57
58 #ifndef PAGER_MAP_SIZE
59 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE
60 #endif
61
62 size_t pager_map_size = PAGER_MAP_SIZE;
63
64 /*
65 * list of uvm pagers in the system
66 */
67
68 const struct uvm_pagerops * const uvmpagerops[] = {
69 &aobj_pager,
70 &uvm_deviceops,
71 &uvm_vnodeops,
72 &ubc_pager,
73 };
74
75 /*
76 * the pager map: provides KVA for I/O
77 */
78
79 struct vm_map *pager_map; /* XXX */
80 kmutex_t pager_map_wanted_lock;
81 bool pager_map_wanted; /* locked by pager map */
82 static vaddr_t emergva;
83 static bool emerginuse;
84
85 /*
86 * uvm_pager_init: init pagers (at boot time)
87 */
88
89 void
90 uvm_pager_init(void)
91 {
92 u_int lcv;
93 vaddr_t sva, eva;
94
95 /*
96 * init pager map
97 */
98
99 sva = 0;
100 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
101 false, NULL);
102 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
103 pager_map_wanted = false;
104 emergva = uvm_km_alloc(kernel_map,
105 round_page(MAXPHYS) + ptoa(uvmexp.ncolors), 0,
106 UVM_KMF_VAONLY);
107 #if defined(DEBUG)
108 if (emergva == 0)
109 panic("emergva");
110 #endif
111 emerginuse = false;
112
113 /*
114 * init ASYNC I/O queue
115 */
116
117 TAILQ_INIT(&uvm.aio_done);
118
119 /*
120 * call pager init functions
121 */
122 for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
123 if (uvmpagerops[lcv]->pgo_init)
124 uvmpagerops[lcv]->pgo_init();
125 }
126 }
127
128 /*
129 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
130 *
131 * we basically just map in a blank map entry to reserve the space in the
132 * map and then use pmap_enter() to put the mappings in by hand.
133 */
134
135 vaddr_t
136 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
137 {
138 vsize_t size;
139 vaddr_t kva;
140 vaddr_t cva;
141 struct vm_page *pp;
142 vm_prot_t prot;
143 const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
144 const u_int first_color = VM_PGCOLOR_BUCKET(*pps);
145 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
146
147 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, first_color=%u)",
148 pps, npages, first_color, 0);
149
150 /*
151 * compute protection. outgoing I/O only needs read
152 * access to the page, whereas incoming needs read/write.
153 */
154
155 prot = VM_PROT_READ;
156 if (flags & UVMPAGER_MAPIN_READ)
157 prot |= VM_PROT_WRITE;
158
159 ReStart:
160 size = ptoa(npages);
161 kva = 0; /* let system choose VA */
162
163 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET,
164 first_color, UVM_FLAG_COLORMATCH | UVM_FLAG_NOMERGE
165 | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
166 if (pdaemon) {
167 mutex_enter(&pager_map_wanted_lock);
168 if (emerginuse) {
169 UVM_UNLOCK_AND_WAIT(&emergva,
170 &pager_map_wanted_lock, false,
171 "emergva", 0);
172 goto ReStart;
173 }
174 emerginuse = true;
175 mutex_exit(&pager_map_wanted_lock);
176 kva = emergva + ptoa(first_color);
177 /* The shift implicitly truncates to PAGE_SIZE */
178 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
179 goto enter;
180 }
181 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
182 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
183 return(0);
184 }
185 mutex_enter(&pager_map_wanted_lock);
186 pager_map_wanted = true;
187 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
188 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
189 "pager_map", 0);
190 goto ReStart;
191 }
192
193 enter:
194 /* got it */
195 for (cva = kva; npages != 0; npages--, cva += PAGE_SIZE) {
196 pp = *pps++;
197 KASSERT(pp);
198 // KASSERT(!((VM_PAGE_TO_PHYS(pp) ^ cva) & uvmexp.colormask));
199 KASSERT(pp->flags & PG_BUSY);
200 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
201 }
202 pmap_update(vm_map_pmap(pager_map));
203
204 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
205 return(kva);
206 }
207
208 /*
209 * uvm_pagermapout: remove pager_map mapping
210 *
211 * we remove our mappings by hand and then remove the mapping (waking
212 * up anyone wanting space).
213 */
214
215 void
216 uvm_pagermapout(vaddr_t kva, int npages)
217 {
218 vsize_t size = ptoa(npages);
219 struct vm_map_entry *entries;
220 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
221
222 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
223
224 /*
225 * duplicate uvm_unmap, but add in pager_map_wanted handling.
226 */
227
228 pmap_kremove(kva, size);
229 pmap_update(pmap_kernel());
230
231 if ((kva & ~ptoa(uvmexp.colormask)) == emergva) {
232 mutex_enter(&pager_map_wanted_lock);
233 emerginuse = false;
234 wakeup(&emergva);
235 mutex_exit(&pager_map_wanted_lock);
236 return;
237 }
238
239 vm_map_lock(pager_map);
240 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
241 mutex_enter(&pager_map_wanted_lock);
242 if (pager_map_wanted) {
243 pager_map_wanted = false;
244 wakeup(pager_map);
245 }
246 mutex_exit(&pager_map_wanted_lock);
247 vm_map_unlock(pager_map);
248 if (entries)
249 uvm_unmap_detach(entries, 0);
250 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
251 }
252
253 /*
254 * interrupt-context iodone handler for single-buf i/os
255 * or the top-level buf of a nested-buf i/o.
256 */
257
258 void
259 uvm_aio_biodone(struct buf *bp)
260 {
261 /* reset b_iodone for when this is a single-buf i/o. */
262 bp->b_iodone = uvm_aio_aiodone;
263
264 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
265 }
266
267 void
268 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
269 {
270 struct uvm_object *uobj;
271 struct vm_page *pg;
272 kmutex_t *slock;
273 int pageout_done;
274 int swslot;
275 int i;
276 bool swap;
277 UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
278
279 swslot = 0;
280 pageout_done = 0;
281 slock = NULL;
282 uobj = NULL;
283 pg = pgs[0];
284 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
285 (pg->pqflags & PQ_AOBJ) != 0;
286 if (!swap) {
287 uobj = pg->uobject;
288 slock = uobj->vmobjlock;
289 mutex_enter(slock);
290 mutex_enter(&uvm_pageqlock);
291 } else {
292 #if defined(VMSWAP)
293 if (error) {
294 if (pg->uobject != NULL) {
295 swslot = uao_find_swslot(pg->uobject,
296 pg->offset >> PAGE_SHIFT);
297 } else {
298 KASSERT(pg->uanon != NULL);
299 swslot = pg->uanon->an_swslot;
300 }
301 KASSERT(swslot);
302 }
303 #else /* defined(VMSWAP) */
304 panic("%s: swap", __func__);
305 #endif /* defined(VMSWAP) */
306 }
307 for (i = 0; i < npages; i++) {
308 #if defined(VMSWAP)
309 bool anon_disposed = false; /* XXX gcc */
310 #endif /* defined(VMSWAP) */
311
312 pg = pgs[i];
313 KASSERT(swap || pg->uobject == uobj);
314 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
315
316 #if defined(VMSWAP)
317 /*
318 * for swap i/os, lock each page's object (or anon)
319 * individually since each page may need a different lock.
320 */
321
322 if (swap) {
323 if (pg->uobject != NULL) {
324 slock = pg->uobject->vmobjlock;
325 } else {
326 slock = pg->uanon->an_lock;
327 }
328 mutex_enter(slock);
329 mutex_enter(&uvm_pageqlock);
330 anon_disposed = (pg->flags & PG_RELEASED) != 0;
331 KASSERT(!anon_disposed || pg->uobject != NULL ||
332 pg->uanon->an_ref == 0);
333 }
334 #endif /* defined(VMSWAP) */
335
336 /*
337 * process errors. for reads, just mark the page to be freed.
338 * for writes, if the error was ENOMEM, we assume this was
339 * a transient failure so we mark the page dirty so that
340 * we'll try to write it again later. for all other write
341 * errors, we assume the error is permanent, thus the data
342 * in the page is lost. bummer.
343 */
344
345 if (error) {
346 int slot;
347 if (!write) {
348 pg->flags |= PG_RELEASED;
349 continue;
350 } else if (error == ENOMEM) {
351 if (pg->flags & PG_PAGEOUT) {
352 pg->flags &= ~PG_PAGEOUT;
353 pageout_done++;
354 }
355 pg->flags &= ~PG_CLEAN;
356 uvm_pageactivate(pg);
357 slot = 0;
358 } else
359 slot = SWSLOT_BAD;
360
361 #if defined(VMSWAP)
362 if (swap) {
363 if (pg->uobject != NULL) {
364 int oldslot;
365 oldslot = uao_set_swslot(pg->uobject,
366 pg->offset >> PAGE_SHIFT, slot);
367 KASSERT(oldslot == swslot + i);
368 } else {
369 KASSERT(pg->uanon->an_swslot ==
370 swslot + i);
371 pg->uanon->an_swslot = slot;
372 }
373 }
374 #endif /* defined(VMSWAP) */
375 }
376
377 /*
378 * if the page is PG_FAKE, this must have been a read to
379 * initialize the page. clear PG_FAKE and activate the page.
380 * we must also clear the pmap "modified" flag since it may
381 * still be set from the page's previous identity.
382 */
383
384 if (pg->flags & PG_FAKE) {
385 KASSERT(!write);
386 pg->flags &= ~PG_FAKE;
387 #if defined(READAHEAD_STATS)
388 pg->pqflags |= PQ_READAHEAD;
389 uvm_ra_total.ev_count++;
390 #endif /* defined(READAHEAD_STATS) */
391 KASSERT((pg->flags & PG_CLEAN) != 0);
392 uvm_pageenqueue(pg);
393 pmap_clear_modify(pg);
394 }
395
396 /*
397 * do accounting for pagedaemon i/o and arrange to free
398 * the pages instead of just unbusying them.
399 */
400
401 if (pg->flags & PG_PAGEOUT) {
402 pg->flags &= ~PG_PAGEOUT;
403 pageout_done++;
404 uvmexp.pdfreed++;
405 pg->flags |= PG_RELEASED;
406 }
407
408 #if defined(VMSWAP)
409 /*
410 * for swap pages, unlock everything for this page now.
411 */
412
413 if (swap) {
414 if (pg->uobject == NULL && anon_disposed) {
415 mutex_exit(&uvm_pageqlock);
416 uvm_anon_release(pg->uanon);
417 } else {
418 uvm_page_unbusy(&pg, 1);
419 mutex_exit(&uvm_pageqlock);
420 mutex_exit(slock);
421 }
422 }
423 #endif /* defined(VMSWAP) */
424 }
425 uvm_pageout_done(pageout_done);
426 if (!swap) {
427 uvm_page_unbusy(pgs, npages);
428 mutex_exit(&uvm_pageqlock);
429 mutex_exit(slock);
430 } else {
431 #if defined(VMSWAP)
432 KASSERT(write);
433
434 /* these pages are now only in swap. */
435 mutex_enter(&uvm_swap_data_lock);
436 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
437 if (error != ENOMEM)
438 uvmexp.swpgonly += npages;
439 mutex_exit(&uvm_swap_data_lock);
440 if (error) {
441 if (error != ENOMEM)
442 uvm_swap_markbad(swslot, npages);
443 else
444 uvm_swap_free(swslot, npages);
445 }
446 uvmexp.pdpending--;
447 #endif /* defined(VMSWAP) */
448 }
449 }
450
451 /*
452 * uvm_aio_aiodone: do iodone processing for async i/os.
453 * this should be called in thread context, not interrupt context.
454 */
455
456 void
457 uvm_aio_aiodone(struct buf *bp)
458 {
459 int npages = bp->b_bufsize >> PAGE_SHIFT;
460 struct vm_page *pgs[npages];
461 int i, error;
462 bool write;
463 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
464 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
465
466 error = bp->b_error;
467 write = (bp->b_flags & B_READ) == 0;
468
469 for (i = 0; i < npages; i++) {
470 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
471 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
472 }
473 uvm_pagermapout((vaddr_t)bp->b_data, npages);
474
475 uvm_aio_aiodone_pages(pgs, npages, write, error);
476
477 if (write && (bp->b_cflags & BC_AGE) != 0) {
478 mutex_enter(bp->b_objlock);
479 vwakeup(bp);
480 mutex_exit(bp->b_objlock);
481 }
482 putiobuf(bp);
483 }
484
485 /*
486 * uvm_pageratop: convert KVAs in the pager map back to their page
487 * structures.
488 */
489
490 struct vm_page *
491 uvm_pageratop(vaddr_t kva)
492 {
493 struct vm_page *pg;
494 paddr_t pa;
495 bool rv;
496
497 rv = pmap_extract(pmap_kernel(), kva, &pa);
498 KASSERT(rv);
499 pg = PHYS_TO_VM_PAGE(pa);
500 KASSERT(pg != NULL);
501 return (pg);
502 }
503