uvm_pager.c revision 1.80 1 /* $NetBSD: uvm_pager.c,v 1.80 2007/02/21 23:00:14 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.80 2007/02/21 23:00:14 thorpej Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/vnode.h>
53
54 #include <uvm/uvm.h>
55
56 struct pool *uvm_aiobuf_pool;
57
58 /*
59 * list of uvm pagers in the system
60 */
61
62 struct uvm_pagerops * const uvmpagerops[] = {
63 &aobj_pager,
64 &uvm_deviceops,
65 &uvm_vnodeops,
66 &ubc_pager,
67 };
68
69 /*
70 * the pager map: provides KVA for I/O
71 */
72
73 struct vm_map *pager_map; /* XXX */
74 struct simplelock pager_map_wanted_lock;
75 bool pager_map_wanted; /* locked by pager map */
76 static vaddr_t emergva;
77 static bool emerginuse;
78
79 /*
80 * uvm_pager_init: init pagers (at boot time)
81 */
82
83 void
84 uvm_pager_init(void)
85 {
86 u_int lcv;
87 vaddr_t sva, eva;
88
89 /*
90 * init pager map
91 */
92
93 sva = 0;
94 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
95 FALSE, NULL);
96 simple_lock_init(&pager_map_wanted_lock);
97 pager_map_wanted = FALSE;
98 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
99 UVM_KMF_VAONLY);
100 #if defined(DEBUG)
101 if (emergva == 0)
102 panic("emergva");
103 #endif
104 emerginuse = FALSE;
105
106 /*
107 * init ASYNC I/O queue
108 */
109
110 TAILQ_INIT(&uvm.aio_done);
111
112 /*
113 * call pager init functions
114 */
115 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
116 lcv++) {
117 if (uvmpagerops[lcv]->pgo_init)
118 uvmpagerops[lcv]->pgo_init();
119 }
120 }
121
122 /*
123 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
124 *
125 * we basically just map in a blank map entry to reserve the space in the
126 * map and then use pmap_enter() to put the mappings in by hand.
127 */
128
129 vaddr_t
130 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
131 {
132 vsize_t size;
133 vaddr_t kva;
134 vaddr_t cva;
135 struct vm_page *pp;
136 vm_prot_t prot;
137 const bool pdaemon = curproc == uvm.pagedaemon_proc;
138 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
139
140 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
141
142 /*
143 * compute protection. outgoing I/O only needs read
144 * access to the page, whereas incoming needs read/write.
145 */
146
147 prot = VM_PROT_READ;
148 if (flags & UVMPAGER_MAPIN_READ)
149 prot |= VM_PROT_WRITE;
150
151 ReStart:
152 size = npages << PAGE_SHIFT;
153 kva = 0; /* let system choose VA */
154
155 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
156 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
157 if (pdaemon) {
158 simple_lock(&pager_map_wanted_lock);
159 if (emerginuse) {
160 UVM_UNLOCK_AND_WAIT(&emergva,
161 &pager_map_wanted_lock, FALSE,
162 "emergva", 0);
163 goto ReStart;
164 }
165 emerginuse = TRUE;
166 simple_unlock(&pager_map_wanted_lock);
167 kva = emergva;
168 /* The shift implicitly truncates to PAGE_SIZE */
169 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
170 goto enter;
171 }
172 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
173 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
174 return(0);
175 }
176 simple_lock(&pager_map_wanted_lock);
177 pager_map_wanted = TRUE;
178 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
179 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
180 "pager_map", 0);
181 goto ReStart;
182 }
183
184 enter:
185 /* got it */
186 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
187 pp = *pps++;
188 KASSERT(pp);
189 KASSERT(pp->flags & PG_BUSY);
190 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
191 }
192 pmap_update(vm_map_pmap(pager_map));
193
194 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
195 return(kva);
196 }
197
198 /*
199 * uvm_pagermapout: remove pager_map mapping
200 *
201 * we remove our mappings by hand and then remove the mapping (waking
202 * up anyone wanting space).
203 */
204
205 void
206 uvm_pagermapout(vaddr_t kva, int npages)
207 {
208 vsize_t size = npages << PAGE_SHIFT;
209 struct vm_map_entry *entries;
210 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
211
212 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
213
214 /*
215 * duplicate uvm_unmap, but add in pager_map_wanted handling.
216 */
217
218 pmap_kremove(kva, npages << PAGE_SHIFT);
219 if (kva == emergva) {
220 simple_lock(&pager_map_wanted_lock);
221 emerginuse = FALSE;
222 wakeup(&emergva);
223 simple_unlock(&pager_map_wanted_lock);
224 return;
225 }
226
227 vm_map_lock(pager_map);
228 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
229 simple_lock(&pager_map_wanted_lock);
230 if (pager_map_wanted) {
231 pager_map_wanted = FALSE;
232 wakeup(pager_map);
233 }
234 simple_unlock(&pager_map_wanted_lock);
235 vm_map_unlock(pager_map);
236 if (entries)
237 uvm_unmap_detach(entries, 0);
238 pmap_update(pmap_kernel());
239 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
240 }
241
242 /*
243 * interrupt-context iodone handler for nested i/o bufs.
244 *
245 * => must be at splbio().
246 */
247
248 void
249 uvm_aio_biodone1(struct buf *bp)
250 {
251 struct buf *mbp = bp->b_private;
252
253 KASSERT(mbp != bp);
254 if (bp->b_flags & B_ERROR) {
255 mbp->b_flags |= B_ERROR;
256 mbp->b_error = bp->b_error;
257 }
258 mbp->b_resid -= bp->b_bcount;
259 putiobuf(bp);
260 if (mbp->b_resid == 0) {
261 biodone(mbp);
262 }
263 }
264
265 /*
266 * interrupt-context iodone handler for single-buf i/os
267 * or the top-level buf of a nested-buf i/o.
268 *
269 * => must be at splbio().
270 */
271
272 void
273 uvm_aio_biodone(struct buf *bp)
274 {
275 /* reset b_iodone for when this is a single-buf i/o. */
276 bp->b_iodone = uvm_aio_aiodone;
277
278 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work);
279 }
280
281 /*
282 * uvm_aio_aiodone: do iodone processing for async i/os.
283 * this should be called in thread context, not interrupt context.
284 */
285
286 void
287 uvm_aio_aiodone(struct buf *bp)
288 {
289 int npages = bp->b_bufsize >> PAGE_SHIFT;
290 struct vm_page *pg, *pgs[npages];
291 struct uvm_object *uobj;
292 struct simplelock *slock;
293 int s, i, error, swslot;
294 bool write, swap;
295 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
296 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
297
298 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
299 write = (bp->b_flags & B_READ) == 0;
300 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
301 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
302 (*bioops.io_pageiodone)(bp);
303 }
304
305 uobj = NULL;
306 for (i = 0; i < npages; i++) {
307 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
308 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
309 }
310 uvm_pagermapout((vaddr_t)bp->b_data, npages);
311
312 swslot = 0;
313 slock = NULL;
314 pg = pgs[0];
315 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
316 (pg->pqflags & PQ_AOBJ) != 0;
317 if (!swap) {
318 uobj = pg->uobject;
319 slock = &uobj->vmobjlock;
320 simple_lock(slock);
321 uvm_lock_pageq();
322 } else {
323 #if defined(VMSWAP)
324 if (error) {
325 if (pg->uobject != NULL) {
326 swslot = uao_find_swslot(pg->uobject,
327 pg->offset >> PAGE_SHIFT);
328 } else {
329 KASSERT(pg->uanon != NULL);
330 swslot = pg->uanon->an_swslot;
331 }
332 KASSERT(swslot);
333 }
334 #else /* defined(VMSWAP) */
335 panic("%s: swap", __func__);
336 #endif /* defined(VMSWAP) */
337 }
338 for (i = 0; i < npages; i++) {
339 pg = pgs[i];
340 KASSERT(swap || pg->uobject == uobj);
341 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
342
343 #if defined(VMSWAP)
344 /*
345 * for swap i/os, lock each page's object (or anon)
346 * individually since each page may need a different lock.
347 */
348
349 if (swap) {
350 if (pg->uobject != NULL) {
351 slock = &pg->uobject->vmobjlock;
352 } else {
353 slock = &pg->uanon->an_lock;
354 }
355 simple_lock(slock);
356 uvm_lock_pageq();
357 }
358 #endif /* defined(VMSWAP) */
359
360 /*
361 * process errors. for reads, just mark the page to be freed.
362 * for writes, if the error was ENOMEM, we assume this was
363 * a transient failure so we mark the page dirty so that
364 * we'll try to write it again later. for all other write
365 * errors, we assume the error is permanent, thus the data
366 * in the page is lost. bummer.
367 */
368
369 if (error) {
370 int slot;
371 if (!write) {
372 pg->flags |= PG_RELEASED;
373 continue;
374 } else if (error == ENOMEM) {
375 if (pg->flags & PG_PAGEOUT) {
376 pg->flags &= ~PG_PAGEOUT;
377 uvmexp.paging--;
378 }
379 pg->flags &= ~PG_CLEAN;
380 uvm_pageactivate(pg);
381 slot = 0;
382 } else
383 slot = SWSLOT_BAD;
384
385 #if defined(VMSWAP)
386 if (swap) {
387 if (pg->uobject != NULL) {
388 int oldslot;
389 oldslot = uao_set_swslot(pg->uobject,
390 pg->offset >> PAGE_SHIFT, slot);
391 KASSERT(oldslot == swslot + i);
392 } else {
393 KASSERT(pg->uanon->an_swslot ==
394 swslot + i);
395 pg->uanon->an_swslot = slot;
396 }
397 }
398 #endif /* defined(VMSWAP) */
399 }
400
401 /*
402 * if the page is PG_FAKE, this must have been a read to
403 * initialize the page. clear PG_FAKE and activate the page.
404 * we must also clear the pmap "modified" flag since it may
405 * still be set from the page's previous identity.
406 */
407
408 if (pg->flags & PG_FAKE) {
409 KASSERT(!write);
410 pg->flags &= ~PG_FAKE;
411 #if defined(READAHEAD_STATS)
412 pg->pqflags |= PQ_READAHEAD;
413 uvm_ra_total.ev_count++;
414 #endif /* defined(READAHEAD_STATS) */
415 KASSERT((pg->flags & PG_CLEAN) != 0);
416 uvm_pageenqueue(pg);
417 pmap_clear_modify(pg);
418 }
419
420 /*
421 * do accounting for pagedaemon i/o and arrange to free
422 * the pages instead of just unbusying them.
423 */
424
425 if (pg->flags & PG_PAGEOUT) {
426 pg->flags &= ~PG_PAGEOUT;
427 uvmexp.paging--;
428 uvmexp.pdfreed++;
429 pg->flags |= PG_RELEASED;
430 }
431
432 #if defined(VMSWAP)
433 /*
434 * for swap pages, unlock everything for this page now.
435 */
436
437 if (swap) {
438 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
439 (pg->flags & PG_RELEASED) != 0) {
440 uvm_unlock_pageq();
441 uvm_anon_release(pg->uanon);
442 } else {
443 uvm_page_unbusy(&pg, 1);
444 uvm_unlock_pageq();
445 simple_unlock(slock);
446 }
447 }
448 #endif /* defined(VMSWAP) */
449 }
450 if (!swap) {
451 uvm_page_unbusy(pgs, npages);
452 uvm_unlock_pageq();
453 simple_unlock(slock);
454 } else {
455 #if defined(VMSWAP)
456 KASSERT(write);
457
458 /* these pages are now only in swap. */
459 simple_lock(&uvm.swap_data_lock);
460 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
461 if (error != ENOMEM)
462 uvmexp.swpgonly += npages;
463 simple_unlock(&uvm.swap_data_lock);
464 if (error) {
465 if (error != ENOMEM)
466 uvm_swap_markbad(swslot, npages);
467 else
468 uvm_swap_free(swslot, npages);
469 }
470 uvmexp.pdpending--;
471 #endif /* defined(VMSWAP) */
472 }
473 s = splbio();
474 if (write && (bp->b_flags & B_AGE) != 0) {
475 vwakeup(bp);
476 }
477 putiobuf(bp);
478 splx(s);
479 }
480
481 /*
482 * uvm_pageratop: convert KVAs in the pager map back to their page
483 * structures.
484 */
485
486 struct vm_page *
487 uvm_pageratop(vaddr_t kva)
488 {
489 struct vm_page *pg;
490 paddr_t pa;
491 bool rv;
492
493 rv = pmap_extract(pmap_kernel(), kva, &pa);
494 KASSERT(rv);
495 pg = PHYS_TO_VM_PAGE(pa);
496 KASSERT(pg != NULL);
497 return (pg);
498 }
499