uvm_pager.c revision 1.87 1 /* $NetBSD: uvm_pager.c,v 1.87 2007/10/25 13:03:06 yamt Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.87 2007/10/25 13:03:06 yamt Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46 #include "opt_pagermap.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/pool.h>
53 #include <sys/vnode.h>
54
55 #include <uvm/uvm.h>
56
57 /*
58 * XXX
59 * this is needed until the device strategy interface
60 * is changed to do physically-addressed i/o.
61 */
62
63 #ifndef PAGER_MAP_DEFAULT_SIZE
64 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024)
65 #endif
66
67 #ifndef PAGER_MAP_SIZE
68 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE
69 #endif
70
71 size_t pager_map_size = PAGER_MAP_SIZE;
72
73 struct pool *uvm_aiobuf_pool;
74
75 /*
76 * list of uvm pagers in the system
77 */
78
79 struct uvm_pagerops * const uvmpagerops[] = {
80 &aobj_pager,
81 &uvm_deviceops,
82 &uvm_vnodeops,
83 &ubc_pager,
84 };
85
86 /*
87 * the pager map: provides KVA for I/O
88 */
89
90 struct vm_map *pager_map; /* XXX */
91 kmutex_t pager_map_wanted_lock;
92 bool pager_map_wanted; /* locked by pager map */
93 static vaddr_t emergva;
94 static bool emerginuse;
95
96 /*
97 * uvm_pager_init: init pagers (at boot time)
98 */
99
100 void
101 uvm_pager_init(void)
102 {
103 u_int lcv;
104 vaddr_t sva, eva;
105
106 /*
107 * init pager map
108 */
109
110 sva = 0;
111 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
112 false, NULL);
113 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
114 pager_map_wanted = false;
115 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
116 UVM_KMF_VAONLY);
117 #if defined(DEBUG)
118 if (emergva == 0)
119 panic("emergva");
120 #endif
121 emerginuse = false;
122
123 /*
124 * init ASYNC I/O queue
125 */
126
127 TAILQ_INIT(&uvm.aio_done);
128
129 /*
130 * call pager init functions
131 */
132 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
133 lcv++) {
134 if (uvmpagerops[lcv]->pgo_init)
135 uvmpagerops[lcv]->pgo_init();
136 }
137 }
138
139 /*
140 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
141 *
142 * we basically just map in a blank map entry to reserve the space in the
143 * map and then use pmap_enter() to put the mappings in by hand.
144 */
145
146 vaddr_t
147 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
148 {
149 vsize_t size;
150 vaddr_t kva;
151 vaddr_t cva;
152 struct vm_page *pp;
153 vm_prot_t prot;
154 const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
155 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
156
157 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
158
159 /*
160 * compute protection. outgoing I/O only needs read
161 * access to the page, whereas incoming needs read/write.
162 */
163
164 prot = VM_PROT_READ;
165 if (flags & UVMPAGER_MAPIN_READ)
166 prot |= VM_PROT_WRITE;
167
168 ReStart:
169 size = npages << PAGE_SHIFT;
170 kva = 0; /* let system choose VA */
171
172 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
173 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
174 if (pdaemon) {
175 mutex_enter(&pager_map_wanted_lock);
176 if (emerginuse) {
177 mtsleep(&emergva, PVM | PNORELOCK, "emergva",
178 0, &pager_map_wanted_lock);
179 goto ReStart;
180 }
181 emerginuse = true;
182 mutex_exit(&pager_map_wanted_lock);
183 kva = emergva;
184 /* The shift implicitly truncates to PAGE_SIZE */
185 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
186 goto enter;
187 }
188 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
189 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
190 return(0);
191 }
192 mutex_enter(&pager_map_wanted_lock);
193 pager_map_wanted = true;
194 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
195 mtsleep(pager_map, PVM | PNORELOCK, "pager_map", 0,
196 &pager_map_wanted_lock);
197 goto ReStart;
198 }
199
200 enter:
201 /* got it */
202 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
203 pp = *pps++;
204 KASSERT(pp);
205 KASSERT(pp->flags & PG_BUSY);
206 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
207 }
208 pmap_update(vm_map_pmap(pager_map));
209
210 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
211 return(kva);
212 }
213
214 /*
215 * uvm_pagermapout: remove pager_map mapping
216 *
217 * we remove our mappings by hand and then remove the mapping (waking
218 * up anyone wanting space).
219 */
220
221 void
222 uvm_pagermapout(vaddr_t kva, int npages)
223 {
224 vsize_t size = npages << PAGE_SHIFT;
225 struct vm_map_entry *entries;
226 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
227
228 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
229
230 /*
231 * duplicate uvm_unmap, but add in pager_map_wanted handling.
232 */
233
234 pmap_kremove(kva, npages << PAGE_SHIFT);
235 if (kva == emergva) {
236 mutex_enter(&pager_map_wanted_lock);
237 emerginuse = false;
238 wakeup(&emergva);
239 mutex_exit(&pager_map_wanted_lock);
240 return;
241 }
242
243 vm_map_lock(pager_map);
244 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
245 mutex_enter(&pager_map_wanted_lock);
246 if (pager_map_wanted) {
247 pager_map_wanted = false;
248 wakeup(pager_map);
249 }
250 mutex_exit(&pager_map_wanted_lock);
251 vm_map_unlock(pager_map);
252 if (entries)
253 uvm_unmap_detach(entries, 0);
254 pmap_update(pmap_kernel());
255 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
256 }
257
258 /*
259 * interrupt-context iodone handler for nested i/o bufs.
260 *
261 * => must be at splbio().
262 */
263
264 void
265 uvm_aio_biodone1(struct buf *bp)
266 {
267 struct buf *mbp = bp->b_private;
268
269 KASSERT(mbp != bp);
270 if (bp->b_error != 0)
271 mbp->b_error = bp->b_error;
272 mbp->b_resid -= bp->b_bcount;
273 putiobuf(bp);
274 if (mbp->b_resid == 0) {
275 biodone(mbp);
276 }
277 }
278
279 /*
280 * interrupt-context iodone handler for single-buf i/os
281 * or the top-level buf of a nested-buf i/o.
282 *
283 * => must be at splbio().
284 */
285
286 void
287 uvm_aio_biodone(struct buf *bp)
288 {
289 /* reset b_iodone for when this is a single-buf i/o. */
290 bp->b_iodone = uvm_aio_aiodone;
291
292 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
293 }
294
295 /*
296 * uvm_aio_aiodone: do iodone processing for async i/os.
297 * this should be called in thread context, not interrupt context.
298 */
299
300 void
301 uvm_aio_aiodone(struct buf *bp)
302 {
303 int npages = bp->b_bufsize >> PAGE_SHIFT;
304 struct vm_page *pg, *pgs[npages];
305 struct uvm_object *uobj;
306 struct simplelock *slock;
307 int s, i, error, swslot;
308 bool write, swap;
309 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
310 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
311
312 error = bp->b_error;
313 write = (bp->b_flags & B_READ) == 0;
314 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
315 if (write && !(bp->b_flags & B_NOCACHE) && bioopsp) {
316 bioopsp->io_pageiodone(bp);
317 }
318
319 uobj = NULL;
320 for (i = 0; i < npages; i++) {
321 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
322 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
323 }
324 uvm_pagermapout((vaddr_t)bp->b_data, npages);
325
326 swslot = 0;
327 slock = NULL;
328 pg = pgs[0];
329 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
330 (pg->pqflags & PQ_AOBJ) != 0;
331 if (!swap) {
332 uobj = pg->uobject;
333 slock = &uobj->vmobjlock;
334 simple_lock(slock);
335 uvm_lock_pageq();
336 } else {
337 #if defined(VMSWAP)
338 if (error) {
339 if (pg->uobject != NULL) {
340 swslot = uao_find_swslot(pg->uobject,
341 pg->offset >> PAGE_SHIFT);
342 } else {
343 KASSERT(pg->uanon != NULL);
344 swslot = pg->uanon->an_swslot;
345 }
346 KASSERT(swslot);
347 }
348 #else /* defined(VMSWAP) */
349 panic("%s: swap", __func__);
350 #endif /* defined(VMSWAP) */
351 }
352 for (i = 0; i < npages; i++) {
353 pg = pgs[i];
354 KASSERT(swap || pg->uobject == uobj);
355 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
356
357 #if defined(VMSWAP)
358 /*
359 * for swap i/os, lock each page's object (or anon)
360 * individually since each page may need a different lock.
361 */
362
363 if (swap) {
364 if (pg->uobject != NULL) {
365 slock = &pg->uobject->vmobjlock;
366 } else {
367 slock = &pg->uanon->an_lock;
368 }
369 simple_lock(slock);
370 uvm_lock_pageq();
371 }
372 #endif /* defined(VMSWAP) */
373
374 /*
375 * process errors. for reads, just mark the page to be freed.
376 * for writes, if the error was ENOMEM, we assume this was
377 * a transient failure so we mark the page dirty so that
378 * we'll try to write it again later. for all other write
379 * errors, we assume the error is permanent, thus the data
380 * in the page is lost. bummer.
381 */
382
383 if (error) {
384 int slot;
385 if (!write) {
386 pg->flags |= PG_RELEASED;
387 continue;
388 } else if (error == ENOMEM) {
389 if (pg->flags & PG_PAGEOUT) {
390 pg->flags &= ~PG_PAGEOUT;
391 uvmexp.paging--;
392 }
393 pg->flags &= ~PG_CLEAN;
394 uvm_pageactivate(pg);
395 slot = 0;
396 } else
397 slot = SWSLOT_BAD;
398
399 #if defined(VMSWAP)
400 if (swap) {
401 if (pg->uobject != NULL) {
402 int oldslot;
403 oldslot = uao_set_swslot(pg->uobject,
404 pg->offset >> PAGE_SHIFT, slot);
405 KASSERT(oldslot == swslot + i);
406 } else {
407 KASSERT(pg->uanon->an_swslot ==
408 swslot + i);
409 pg->uanon->an_swslot = slot;
410 }
411 }
412 #endif /* defined(VMSWAP) */
413 }
414
415 /*
416 * if the page is PG_FAKE, this must have been a read to
417 * initialize the page. clear PG_FAKE and activate the page.
418 * we must also clear the pmap "modified" flag since it may
419 * still be set from the page's previous identity.
420 */
421
422 if (pg->flags & PG_FAKE) {
423 KASSERT(!write);
424 pg->flags &= ~PG_FAKE;
425 #if defined(READAHEAD_STATS)
426 pg->pqflags |= PQ_READAHEAD;
427 uvm_ra_total.ev_count++;
428 #endif /* defined(READAHEAD_STATS) */
429 KASSERT((pg->flags & PG_CLEAN) != 0);
430 uvm_pageenqueue(pg);
431 pmap_clear_modify(pg);
432 }
433
434 /*
435 * do accounting for pagedaemon i/o and arrange to free
436 * the pages instead of just unbusying them.
437 */
438
439 if (pg->flags & PG_PAGEOUT) {
440 pg->flags &= ~PG_PAGEOUT;
441 uvmexp.paging--;
442 uvmexp.pdfreed++;
443 pg->flags |= PG_RELEASED;
444 }
445
446 #if defined(VMSWAP)
447 /*
448 * for swap pages, unlock everything for this page now.
449 */
450
451 if (swap) {
452 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
453 (pg->flags & PG_RELEASED) != 0) {
454 uvm_unlock_pageq();
455 uvm_anon_release(pg->uanon);
456 } else {
457 uvm_page_unbusy(&pg, 1);
458 uvm_unlock_pageq();
459 simple_unlock(slock);
460 }
461 }
462 #endif /* defined(VMSWAP) */
463 }
464 if (!swap) {
465 uvm_page_unbusy(pgs, npages);
466 uvm_unlock_pageq();
467 simple_unlock(slock);
468 } else {
469 #if defined(VMSWAP)
470 KASSERT(write);
471
472 /* these pages are now only in swap. */
473 mutex_enter(&uvm_swap_data_lock);
474 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
475 if (error != ENOMEM)
476 uvmexp.swpgonly += npages;
477 mutex_exit(&uvm_swap_data_lock);
478 if (error) {
479 if (error != ENOMEM)
480 uvm_swap_markbad(swslot, npages);
481 else
482 uvm_swap_free(swslot, npages);
483 }
484 uvmexp.pdpending--;
485 #endif /* defined(VMSWAP) */
486 }
487 s = splbio();
488 if (write && (bp->b_flags & B_AGE) != 0) {
489 vwakeup(bp);
490 }
491 putiobuf(bp);
492 splx(s);
493 }
494
495 /*
496 * uvm_pageratop: convert KVAs in the pager map back to their page
497 * structures.
498 */
499
500 struct vm_page *
501 uvm_pageratop(vaddr_t kva)
502 {
503 struct vm_page *pg;
504 paddr_t pa;
505 bool rv;
506
507 rv = pmap_extract(pmap_kernel(), kva, &pa);
508 KASSERT(rv);
509 pg = PHYS_TO_VM_PAGE(pa);
510 KASSERT(pg != NULL);
511 return (pg);
512 }
513