uvm_pager.c revision 1.85 1 /* $NetBSD: uvm_pager.c,v 1.85 2007/07/29 13:31:17 ad Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.85 2007/07/29 13:31:17 ad Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/vnode.h>
53
54 #include <uvm/uvm.h>
55
56 struct pool *uvm_aiobuf_pool;
57
58 /*
59 * list of uvm pagers in the system
60 */
61
62 struct uvm_pagerops * const uvmpagerops[] = {
63 &aobj_pager,
64 &uvm_deviceops,
65 &uvm_vnodeops,
66 &ubc_pager,
67 };
68
69 /*
70 * the pager map: provides KVA for I/O
71 */
72
73 struct vm_map *pager_map; /* XXX */
74 kmutex_t pager_map_wanted_lock;
75 bool pager_map_wanted; /* locked by pager map */
76 static vaddr_t emergva;
77 static bool emerginuse;
78
79 /*
80 * uvm_pager_init: init pagers (at boot time)
81 */
82
83 void
84 uvm_pager_init(void)
85 {
86 u_int lcv;
87 vaddr_t sva, eva;
88
89 /*
90 * init pager map
91 */
92
93 sva = 0;
94 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
95 false, NULL);
96 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
97 pager_map_wanted = false;
98 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
99 UVM_KMF_VAONLY);
100 #if defined(DEBUG)
101 if (emergva == 0)
102 panic("emergva");
103 #endif
104 emerginuse = false;
105
106 /*
107 * init ASYNC I/O queue
108 */
109
110 TAILQ_INIT(&uvm.aio_done);
111
112 /*
113 * call pager init functions
114 */
115 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
116 lcv++) {
117 if (uvmpagerops[lcv]->pgo_init)
118 uvmpagerops[lcv]->pgo_init();
119 }
120 }
121
122 /*
123 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
124 *
125 * we basically just map in a blank map entry to reserve the space in the
126 * map and then use pmap_enter() to put the mappings in by hand.
127 */
128
129 vaddr_t
130 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
131 {
132 vsize_t size;
133 vaddr_t kva;
134 vaddr_t cva;
135 struct vm_page *pp;
136 vm_prot_t prot;
137 const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
138 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
139
140 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
141
142 /*
143 * compute protection. outgoing I/O only needs read
144 * access to the page, whereas incoming needs read/write.
145 */
146
147 prot = VM_PROT_READ;
148 if (flags & UVMPAGER_MAPIN_READ)
149 prot |= VM_PROT_WRITE;
150
151 ReStart:
152 size = npages << PAGE_SHIFT;
153 kva = 0; /* let system choose VA */
154
155 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
156 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
157 if (pdaemon) {
158 mutex_enter(&pager_map_wanted_lock);
159 if (emerginuse) {
160 mtsleep(&emergva, PVM | PNORELOCK, "emergva",
161 0, &pager_map_wanted_lock);
162 goto ReStart;
163 }
164 emerginuse = true;
165 mutex_exit(&pager_map_wanted_lock);
166 kva = emergva;
167 /* The shift implicitly truncates to PAGE_SIZE */
168 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
169 goto enter;
170 }
171 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
172 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
173 return(0);
174 }
175 mutex_enter(&pager_map_wanted_lock);
176 pager_map_wanted = true;
177 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
178 mtsleep(pager_map, PVM | PNORELOCK, "pager_map", 0,
179 &pager_map_wanted_lock);
180 goto ReStart;
181 }
182
183 enter:
184 /* got it */
185 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
186 pp = *pps++;
187 KASSERT(pp);
188 KASSERT(pp->flags & PG_BUSY);
189 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
190 }
191 pmap_update(vm_map_pmap(pager_map));
192
193 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
194 return(kva);
195 }
196
197 /*
198 * uvm_pagermapout: remove pager_map mapping
199 *
200 * we remove our mappings by hand and then remove the mapping (waking
201 * up anyone wanting space).
202 */
203
204 void
205 uvm_pagermapout(vaddr_t kva, int npages)
206 {
207 vsize_t size = npages << PAGE_SHIFT;
208 struct vm_map_entry *entries;
209 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
210
211 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
212
213 /*
214 * duplicate uvm_unmap, but add in pager_map_wanted handling.
215 */
216
217 pmap_kremove(kva, npages << PAGE_SHIFT);
218 if (kva == emergva) {
219 mutex_enter(&pager_map_wanted_lock);
220 emerginuse = false;
221 wakeup(&emergva);
222 mutex_exit(&pager_map_wanted_lock);
223 return;
224 }
225
226 vm_map_lock(pager_map);
227 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
228 mutex_enter(&pager_map_wanted_lock);
229 if (pager_map_wanted) {
230 pager_map_wanted = false;
231 wakeup(pager_map);
232 }
233 mutex_exit(&pager_map_wanted_lock);
234 vm_map_unlock(pager_map);
235 if (entries)
236 uvm_unmap_detach(entries, 0);
237 pmap_update(pmap_kernel());
238 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
239 }
240
241 /*
242 * interrupt-context iodone handler for nested i/o bufs.
243 *
244 * => must be at splbio().
245 */
246
247 void
248 uvm_aio_biodone1(struct buf *bp)
249 {
250 struct buf *mbp = bp->b_private;
251
252 KASSERT(mbp != bp);
253 if (bp->b_error != 0)
254 mbp->b_error = bp->b_error;
255 mbp->b_resid -= bp->b_bcount;
256 putiobuf(bp);
257 if (mbp->b_resid == 0) {
258 biodone(mbp);
259 }
260 }
261
262 /*
263 * interrupt-context iodone handler for single-buf i/os
264 * or the top-level buf of a nested-buf i/o.
265 *
266 * => must be at splbio().
267 */
268
269 void
270 uvm_aio_biodone(struct buf *bp)
271 {
272 /* reset b_iodone for when this is a single-buf i/o. */
273 bp->b_iodone = uvm_aio_aiodone;
274
275 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
276 }
277
278 /*
279 * uvm_aio_aiodone: do iodone processing for async i/os.
280 * this should be called in thread context, not interrupt context.
281 */
282
283 void
284 uvm_aio_aiodone(struct buf *bp)
285 {
286 int npages = bp->b_bufsize >> PAGE_SHIFT;
287 struct vm_page *pg, *pgs[npages];
288 struct uvm_object *uobj;
289 struct simplelock *slock;
290 int s, i, error, swslot;
291 bool write, swap;
292 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
293 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
294
295 error = bp->b_error;
296 write = (bp->b_flags & B_READ) == 0;
297 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
298 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
299 (*bioops.io_pageiodone)(bp);
300 }
301
302 uobj = NULL;
303 for (i = 0; i < npages; i++) {
304 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
305 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
306 }
307 uvm_pagermapout((vaddr_t)bp->b_data, npages);
308
309 swslot = 0;
310 slock = NULL;
311 pg = pgs[0];
312 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
313 (pg->pqflags & PQ_AOBJ) != 0;
314 if (!swap) {
315 uobj = pg->uobject;
316 slock = &uobj->vmobjlock;
317 simple_lock(slock);
318 uvm_lock_pageq();
319 } else {
320 #if defined(VMSWAP)
321 if (error) {
322 if (pg->uobject != NULL) {
323 swslot = uao_find_swslot(pg->uobject,
324 pg->offset >> PAGE_SHIFT);
325 } else {
326 KASSERT(pg->uanon != NULL);
327 swslot = pg->uanon->an_swslot;
328 }
329 KASSERT(swslot);
330 }
331 #else /* defined(VMSWAP) */
332 panic("%s: swap", __func__);
333 #endif /* defined(VMSWAP) */
334 }
335 for (i = 0; i < npages; i++) {
336 pg = pgs[i];
337 KASSERT(swap || pg->uobject == uobj);
338 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
339
340 #if defined(VMSWAP)
341 /*
342 * for swap i/os, lock each page's object (or anon)
343 * individually since each page may need a different lock.
344 */
345
346 if (swap) {
347 if (pg->uobject != NULL) {
348 slock = &pg->uobject->vmobjlock;
349 } else {
350 slock = &pg->uanon->an_lock;
351 }
352 simple_lock(slock);
353 uvm_lock_pageq();
354 }
355 #endif /* defined(VMSWAP) */
356
357 /*
358 * process errors. for reads, just mark the page to be freed.
359 * for writes, if the error was ENOMEM, we assume this was
360 * a transient failure so we mark the page dirty so that
361 * we'll try to write it again later. for all other write
362 * errors, we assume the error is permanent, thus the data
363 * in the page is lost. bummer.
364 */
365
366 if (error) {
367 int slot;
368 if (!write) {
369 pg->flags |= PG_RELEASED;
370 continue;
371 } else if (error == ENOMEM) {
372 if (pg->flags & PG_PAGEOUT) {
373 pg->flags &= ~PG_PAGEOUT;
374 uvmexp.paging--;
375 }
376 pg->flags &= ~PG_CLEAN;
377 uvm_pageactivate(pg);
378 slot = 0;
379 } else
380 slot = SWSLOT_BAD;
381
382 #if defined(VMSWAP)
383 if (swap) {
384 if (pg->uobject != NULL) {
385 int oldslot;
386 oldslot = uao_set_swslot(pg->uobject,
387 pg->offset >> PAGE_SHIFT, slot);
388 KASSERT(oldslot == swslot + i);
389 } else {
390 KASSERT(pg->uanon->an_swslot ==
391 swslot + i);
392 pg->uanon->an_swslot = slot;
393 }
394 }
395 #endif /* defined(VMSWAP) */
396 }
397
398 /*
399 * if the page is PG_FAKE, this must have been a read to
400 * initialize the page. clear PG_FAKE and activate the page.
401 * we must also clear the pmap "modified" flag since it may
402 * still be set from the page's previous identity.
403 */
404
405 if (pg->flags & PG_FAKE) {
406 KASSERT(!write);
407 pg->flags &= ~PG_FAKE;
408 #if defined(READAHEAD_STATS)
409 pg->pqflags |= PQ_READAHEAD;
410 uvm_ra_total.ev_count++;
411 #endif /* defined(READAHEAD_STATS) */
412 KASSERT((pg->flags & PG_CLEAN) != 0);
413 uvm_pageenqueue(pg);
414 pmap_clear_modify(pg);
415 }
416
417 /*
418 * do accounting for pagedaemon i/o and arrange to free
419 * the pages instead of just unbusying them.
420 */
421
422 if (pg->flags & PG_PAGEOUT) {
423 pg->flags &= ~PG_PAGEOUT;
424 uvmexp.paging--;
425 uvmexp.pdfreed++;
426 pg->flags |= PG_RELEASED;
427 }
428
429 #if defined(VMSWAP)
430 /*
431 * for swap pages, unlock everything for this page now.
432 */
433
434 if (swap) {
435 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
436 (pg->flags & PG_RELEASED) != 0) {
437 uvm_unlock_pageq();
438 uvm_anon_release(pg->uanon);
439 } else {
440 uvm_page_unbusy(&pg, 1);
441 uvm_unlock_pageq();
442 simple_unlock(slock);
443 }
444 }
445 #endif /* defined(VMSWAP) */
446 }
447 if (!swap) {
448 uvm_page_unbusy(pgs, npages);
449 uvm_unlock_pageq();
450 simple_unlock(slock);
451 } else {
452 #if defined(VMSWAP)
453 KASSERT(write);
454
455 /* these pages are now only in swap. */
456 mutex_enter(&uvm_swap_data_lock);
457 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
458 if (error != ENOMEM)
459 uvmexp.swpgonly += npages;
460 mutex_exit(&uvm_swap_data_lock);
461 if (error) {
462 if (error != ENOMEM)
463 uvm_swap_markbad(swslot, npages);
464 else
465 uvm_swap_free(swslot, npages);
466 }
467 uvmexp.pdpending--;
468 #endif /* defined(VMSWAP) */
469 }
470 s = splbio();
471 if (write && (bp->b_flags & B_AGE) != 0) {
472 vwakeup(bp);
473 }
474 putiobuf(bp);
475 splx(s);
476 }
477
478 /*
479 * uvm_pageratop: convert KVAs in the pager map back to their page
480 * structures.
481 */
482
483 struct vm_page *
484 uvm_pageratop(vaddr_t kva)
485 {
486 struct vm_page *pg;
487 paddr_t pa;
488 bool rv;
489
490 rv = pmap_extract(pmap_kernel(), kva, &pa);
491 KASSERT(rv);
492 pg = PHYS_TO_VM_PAGE(pa);
493 KASSERT(pg != NULL);
494 return (pg);
495 }
496