uvm_pager.c revision 1.72 1 /* $NetBSD: uvm_pager.c,v 1.72 2005/11/29 15:45:28 yamt Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 /*
38 * uvm_pager.c: generic functions used to assist the pagers.
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.72 2005/11/29 15:45:28 yamt Exp $");
43
44 #include "opt_uvmhist.h"
45 #include "opt_readahead.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/vnode.h>
53
54 #define UVM_PAGER_C
55 #include <uvm/uvm.h>
56
57 struct pool *uvm_aiobuf_pool;
58
59 /*
60 * list of uvm pagers in the system
61 */
62
63 struct uvm_pagerops * const uvmpagerops[] = {
64 &aobj_pager,
65 &uvm_deviceops,
66 &uvm_vnodeops,
67 &ubc_pager,
68 };
69
70 /*
71 * the pager map: provides KVA for I/O
72 */
73
74 struct vm_map *pager_map; /* XXX */
75 struct simplelock pager_map_wanted_lock;
76 boolean_t pager_map_wanted; /* locked by pager map */
77 static vaddr_t emergva;
78 static boolean_t emerginuse;
79
80 /*
81 * uvm_pager_init: init pagers (at boot time)
82 */
83
84 void
85 uvm_pager_init(void)
86 {
87 u_int lcv;
88 vaddr_t sva, eva;
89
90 /*
91 * init pager map
92 */
93
94 sva = 0;
95 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
96 FALSE, NULL);
97 simple_lock_init(&pager_map_wanted_lock);
98 pager_map_wanted = FALSE;
99 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
100 UVM_KMF_VAONLY);
101 #if defined(DEBUG)
102 if (emergva == 0)
103 panic("emergva");
104 #endif
105 emerginuse = FALSE;
106
107 /*
108 * init ASYNC I/O queue
109 */
110
111 TAILQ_INIT(&uvm.aio_done);
112
113 /*
114 * call pager init functions
115 */
116 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
117 lcv++) {
118 if (uvmpagerops[lcv]->pgo_init)
119 uvmpagerops[lcv]->pgo_init();
120 }
121 }
122
123 /*
124 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
125 *
126 * we basically just map in a blank map entry to reserve the space in the
127 * map and then use pmap_enter() to put the mappings in by hand.
128 */
129
130 vaddr_t
131 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
132 {
133 vsize_t size;
134 vaddr_t kva;
135 vaddr_t cva;
136 struct vm_page *pp;
137 vm_prot_t prot;
138 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
139
140 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
141
142 /*
143 * compute protection. outgoing I/O only needs read
144 * access to the page, whereas incoming needs read/write.
145 */
146
147 prot = VM_PROT_READ;
148 if (flags & UVMPAGER_MAPIN_READ)
149 prot |= VM_PROT_WRITE;
150
151 ReStart:
152 size = npages << PAGE_SHIFT;
153 kva = 0; /* let system choose VA */
154
155 if (uvm_map(pager_map, &kva, size, NULL,
156 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
157 if (curproc == uvm.pagedaemon_proc) {
158 simple_lock(&pager_map_wanted_lock);
159 if (emerginuse) {
160 UVM_UNLOCK_AND_WAIT(&emergva,
161 &pager_map_wanted_lock, FALSE,
162 "emergva", 0);
163 goto ReStart;
164 }
165 emerginuse = TRUE;
166 simple_unlock(&pager_map_wanted_lock);
167 kva = emergva;
168 /* The shift implicitly truncates to PAGE_SIZE */
169 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
170 goto enter;
171 }
172 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
173 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
174 return(0);
175 }
176 simple_lock(&pager_map_wanted_lock);
177 pager_map_wanted = TRUE;
178 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
179 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
180 "pager_map", 0);
181 goto ReStart;
182 }
183
184 enter:
185 /* got it */
186 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
187 pp = *pps++;
188 KASSERT(pp);
189 KASSERT(pp->flags & PG_BUSY);
190 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
191 }
192 pmap_update(vm_map_pmap(pager_map));
193
194 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
195 return(kva);
196 }
197
198 /*
199 * uvm_pagermapout: remove pager_map mapping
200 *
201 * we remove our mappings by hand and then remove the mapping (waking
202 * up anyone wanting space).
203 */
204
205 void
206 uvm_pagermapout(vaddr_t kva, int npages)
207 {
208 vsize_t size = npages << PAGE_SHIFT;
209 struct vm_map_entry *entries;
210 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
211
212 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
213
214 /*
215 * duplicate uvm_unmap, but add in pager_map_wanted handling.
216 */
217
218 pmap_kremove(kva, npages << PAGE_SHIFT);
219 if (kva == emergva) {
220 simple_lock(&pager_map_wanted_lock);
221 emerginuse = FALSE;
222 wakeup(&emergva);
223 simple_unlock(&pager_map_wanted_lock);
224 return;
225 }
226
227 vm_map_lock(pager_map);
228 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
229 simple_lock(&pager_map_wanted_lock);
230 if (pager_map_wanted) {
231 pager_map_wanted = FALSE;
232 wakeup(pager_map);
233 }
234 simple_unlock(&pager_map_wanted_lock);
235 vm_map_unlock(pager_map);
236 if (entries)
237 uvm_unmap_detach(entries, 0);
238 pmap_update(pmap_kernel());
239 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
240 }
241
242 /*
243 * interrupt-context iodone handler for nested i/o bufs.
244 *
245 * => must be at splbio().
246 */
247
248 void
249 uvm_aio_biodone1(struct buf *bp)
250 {
251 struct buf *mbp = bp->b_private;
252
253 KASSERT(mbp != bp);
254 if (bp->b_flags & B_ERROR) {
255 mbp->b_flags |= B_ERROR;
256 mbp->b_error = bp->b_error;
257 }
258 mbp->b_resid -= bp->b_bcount;
259 pool_put(&bufpool, bp);
260 if (mbp->b_resid == 0) {
261 biodone(mbp);
262 }
263 }
264
265 /*
266 * interrupt-context iodone handler for single-buf i/os
267 * or the top-level buf of a nested-buf i/o.
268 *
269 * => must be at splbio().
270 */
271
272 void
273 uvm_aio_biodone(struct buf *bp)
274 {
275 /* reset b_iodone for when this is a single-buf i/o. */
276 bp->b_iodone = uvm_aio_aiodone;
277
278 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
279 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
280 wakeup(&uvm.aiodoned);
281 simple_unlock(&uvm.aiodoned_lock);
282 }
283
284 /*
285 * uvm_aio_aiodone: do iodone processing for async i/os.
286 * this should be called in thread context, not interrupt context.
287 */
288
289 void
290 uvm_aio_aiodone(struct buf *bp)
291 {
292 int npages = bp->b_bufsize >> PAGE_SHIFT;
293 struct vm_page *pg, *pgs[npages];
294 struct uvm_object *uobj;
295 struct simplelock *slock;
296 int s, i, error, swslot;
297 boolean_t write, swap;
298 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
299 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
300
301 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
302 write = (bp->b_flags & B_READ) == 0;
303 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
304 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
305 (*bioops.io_pageiodone)(bp);
306 }
307
308 uobj = NULL;
309 for (i = 0; i < npages; i++) {
310 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
311 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
312 }
313 uvm_pagermapout((vaddr_t)bp->b_data, npages);
314
315 swslot = 0;
316 slock = NULL;
317 pg = pgs[0];
318 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
319 (pg->pqflags & PQ_AOBJ) != 0;
320 if (!swap) {
321 uobj = pg->uobject;
322 slock = &uobj->vmobjlock;
323 simple_lock(slock);
324 uvm_lock_pageq();
325 } else {
326 #if defined(VMSWAP)
327 if (error) {
328 if (pg->uobject != NULL) {
329 swslot = uao_find_swslot(pg->uobject,
330 pg->offset >> PAGE_SHIFT);
331 } else {
332 swslot = pg->uanon->an_swslot;
333 }
334 KASSERT(swslot);
335 }
336 #else /* defined(VMSWAP) */
337 panic("%s: swap", __func__);
338 #endif /* defined(VMSWAP) */
339 }
340 for (i = 0; i < npages; i++) {
341 pg = pgs[i];
342 KASSERT(swap || pg->uobject == uobj);
343 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
344
345 #if defined(VMSWAP)
346 /*
347 * for swap i/os, lock each page's object (or anon)
348 * individually since each page may need a different lock.
349 */
350
351 if (swap) {
352 if (pg->uobject != NULL) {
353 slock = &pg->uobject->vmobjlock;
354 } else {
355 slock = &pg->uanon->an_lock;
356 }
357 simple_lock(slock);
358 uvm_lock_pageq();
359 }
360 #endif /* defined(VMSWAP) */
361
362 /*
363 * process errors. for reads, just mark the page to be freed.
364 * for writes, if the error was ENOMEM, we assume this was
365 * a transient failure so we mark the page dirty so that
366 * we'll try to write it again later. for all other write
367 * errors, we assume the error is permanent, thus the data
368 * in the page is lost. bummer.
369 */
370
371 if (error) {
372 int slot;
373 if (!write) {
374 pg->flags |= PG_RELEASED;
375 continue;
376 } else if (error == ENOMEM) {
377 if (pg->flags & PG_PAGEOUT) {
378 pg->flags &= ~PG_PAGEOUT;
379 uvmexp.paging--;
380 }
381 pg->flags &= ~PG_CLEAN;
382 uvm_pageactivate(pg);
383 slot = 0;
384 } else
385 slot = SWSLOT_BAD;
386
387 #if defined(VMSWAP)
388 if (swap) {
389 if (pg->uobject != NULL) {
390 int oldslot;
391 oldslot = uao_set_swslot(pg->uobject,
392 pg->offset >> PAGE_SHIFT, slot);
393 KASSERT(oldslot == swslot + i);
394 } else {
395 KASSERT(pg->uanon->an_swslot ==
396 swslot + i);
397 pg->uanon->an_swslot = slot;
398 }
399 }
400 #endif /* defined(VMSWAP) */
401 }
402
403 /*
404 * if the page is PG_FAKE, this must have been a read to
405 * initialize the page. clear PG_FAKE and activate the page.
406 * we must also clear the pmap "modified" flag since it may
407 * still be set from the page's previous identity.
408 */
409
410 if (pg->flags & PG_FAKE) {
411 KASSERT(!write);
412 pg->flags &= ~PG_FAKE;
413 #if defined(READAHEAD_STATS)
414 pg->flags |= PG_SPECULATIVE;
415 uvm_ra_total.ev_count++;
416 #endif /* defined(READAHEAD_STATS) */
417 uvm_pageactivate(pg);
418 pmap_clear_modify(pg);
419 }
420
421 /*
422 * do accounting for pagedaemon i/o and arrange to free
423 * the pages instead of just unbusying them.
424 */
425
426 if (pg->flags & PG_PAGEOUT) {
427 pg->flags &= ~PG_PAGEOUT;
428 uvmexp.paging--;
429 uvmexp.pdfreed++;
430 pg->flags |= PG_RELEASED;
431 }
432
433 #if defined(VMSWAP)
434 /*
435 * for swap pages, unlock everything for this page now.
436 */
437
438 if (swap) {
439 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
440 (pg->flags & PG_RELEASED) != 0) {
441 uvm_unlock_pageq();
442 uvm_anon_release(pg->uanon);
443 } else {
444 uvm_page_unbusy(&pg, 1);
445 uvm_unlock_pageq();
446 simple_unlock(slock);
447 }
448 }
449 #endif /* defined(VMSWAP) */
450 }
451 if (!swap) {
452 uvm_page_unbusy(pgs, npages);
453 uvm_unlock_pageq();
454 simple_unlock(slock);
455 } else {
456 #if defined(VMSWAP)
457 KASSERT(write);
458
459 /* these pages are now only in swap. */
460 simple_lock(&uvm.swap_data_lock);
461 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
462 if (error != ENOMEM)
463 uvmexp.swpgonly += npages;
464 simple_unlock(&uvm.swap_data_lock);
465 if (error) {
466 if (error != ENOMEM)
467 uvm_swap_markbad(swslot, npages);
468 else
469 uvm_swap_free(swslot, npages);
470 }
471 uvmexp.pdpending--;
472 #endif /* defined(VMSWAP) */
473 }
474 s = splbio();
475 if (write && (bp->b_flags & B_AGE) != 0) {
476 vwakeup(bp);
477 }
478 pool_put(&bufpool, bp);
479 splx(s);
480 }
481