uvm_pager.c revision 1.98.4.1 1 /* $NetBSD: uvm_pager.c,v 1.98.4.1 2011/02/08 16:20:07 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
28 */
29
30 /*
31 * uvm_pager.c: generic functions used to assist the pagers.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.98.4.1 2011/02/08 16:20:07 bouyer Exp $");
36
37 #include "opt_uvmhist.h"
38 #include "opt_readahead.h"
39 #include "opt_pagermap.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/vnode.h>
46 #include <sys/buf.h>
47
48 #include <uvm/uvm.h>
49
50 /*
51 * XXX
52 * this is needed until the device strategy interface
53 * is changed to do physically-addressed i/o.
54 */
55
56 #ifndef PAGER_MAP_DEFAULT_SIZE
57 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024)
58 #endif
59
60 #ifndef PAGER_MAP_SIZE
61 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE
62 #endif
63
64 size_t pager_map_size = PAGER_MAP_SIZE;
65
66 /*
67 * list of uvm pagers in the system
68 */
69
70 const struct uvm_pagerops * const uvmpagerops[] = {
71 &aobj_pager,
72 &uvm_deviceops,
73 &uvm_vnodeops,
74 &ubc_pager,
75 };
76
77 /*
78 * the pager map: provides KVA for I/O
79 */
80
81 struct vm_map *pager_map; /* XXX */
82 kmutex_t pager_map_wanted_lock;
83 bool pager_map_wanted; /* locked by pager map */
84 static vaddr_t emergva;
85 static bool emerginuse;
86
87 /*
88 * uvm_pager_init: init pagers (at boot time)
89 */
90
91 void
92 uvm_pager_init(void)
93 {
94 u_int lcv;
95 vaddr_t sva, eva;
96
97 /*
98 * init pager map
99 */
100
101 sva = 0;
102 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0,
103 false, NULL);
104 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE);
105 pager_map_wanted = false;
106 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0,
107 UVM_KMF_VAONLY);
108 #if defined(DEBUG)
109 if (emergva == 0)
110 panic("emergva");
111 #endif
112 emerginuse = false;
113
114 /*
115 * init ASYNC I/O queue
116 */
117
118 TAILQ_INIT(&uvm.aio_done);
119
120 /*
121 * call pager init functions
122 */
123 for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) {
124 if (uvmpagerops[lcv]->pgo_init)
125 uvmpagerops[lcv]->pgo_init();
126 }
127 }
128
129 /*
130 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
131 *
132 * we basically just map in a blank map entry to reserve the space in the
133 * map and then use pmap_enter() to put the mappings in by hand.
134 */
135
136 vaddr_t
137 uvm_pagermapin(struct vm_page **pps, int npages, int flags)
138 {
139 vsize_t size;
140 vaddr_t kva;
141 vaddr_t cva;
142 struct vm_page *pp;
143 vm_prot_t prot;
144 const bool pdaemon = curlwp == uvm.pagedaemon_lwp;
145 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
146
147 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
148
149 /*
150 * compute protection. outgoing I/O only needs read
151 * access to the page, whereas incoming needs read/write.
152 */
153
154 prot = VM_PROT_READ;
155 if (flags & UVMPAGER_MAPIN_READ)
156 prot |= VM_PROT_WRITE;
157
158 ReStart:
159 size = npages << PAGE_SHIFT;
160 kva = 0; /* let system choose VA */
161
162 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0,
163 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) {
164 if (pdaemon) {
165 mutex_enter(&pager_map_wanted_lock);
166 if (emerginuse) {
167 UVM_UNLOCK_AND_WAIT(&emergva,
168 &pager_map_wanted_lock, false,
169 "emergva", 0);
170 goto ReStart;
171 }
172 emerginuse = true;
173 mutex_exit(&pager_map_wanted_lock);
174 kva = emergva;
175 /* The shift implicitly truncates to PAGE_SIZE */
176 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT));
177 goto enter;
178 }
179 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
180 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
181 return(0);
182 }
183 mutex_enter(&pager_map_wanted_lock);
184 pager_map_wanted = true;
185 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
186 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false,
187 "pager_map", 0);
188 goto ReStart;
189 }
190
191 enter:
192 /* got it */
193 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
194 pp = *pps++;
195 KASSERT(pp);
196 KASSERT(pp->flags & PG_BUSY);
197 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot, 0);
198 }
199 pmap_update(vm_map_pmap(pager_map));
200
201 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
202 return(kva);
203 }
204
205 /*
206 * uvm_pagermapout: remove pager_map mapping
207 *
208 * we remove our mappings by hand and then remove the mapping (waking
209 * up anyone wanting space).
210 */
211
212 void
213 uvm_pagermapout(vaddr_t kva, int npages)
214 {
215 vsize_t size = npages << PAGE_SHIFT;
216 struct vm_map_entry *entries;
217 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
218
219 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
220
221 /*
222 * duplicate uvm_unmap, but add in pager_map_wanted handling.
223 */
224
225 pmap_kremove(kva, npages << PAGE_SHIFT);
226 pmap_update(pmap_kernel());
227
228 if (kva == emergva) {
229 mutex_enter(&pager_map_wanted_lock);
230 emerginuse = false;
231 wakeup(&emergva);
232 mutex_exit(&pager_map_wanted_lock);
233 return;
234 }
235
236 vm_map_lock(pager_map);
237 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
238 mutex_enter(&pager_map_wanted_lock);
239 if (pager_map_wanted) {
240 pager_map_wanted = false;
241 wakeup(pager_map);
242 }
243 mutex_exit(&pager_map_wanted_lock);
244 vm_map_unlock(pager_map);
245 if (entries)
246 uvm_unmap_detach(entries, 0);
247 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
248 }
249
250 /*
251 * interrupt-context iodone handler for single-buf i/os
252 * or the top-level buf of a nested-buf i/o.
253 */
254
255 void
256 uvm_aio_biodone(struct buf *bp)
257 {
258 /* reset b_iodone for when this is a single-buf i/o. */
259 bp->b_iodone = uvm_aio_aiodone;
260
261 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL);
262 }
263
264 void
265 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error)
266 {
267 struct uvm_object *uobj;
268 struct vm_page *pg;
269 kmutex_t *slock;
270 int pageout_done;
271 int swslot;
272 int i;
273 bool swap;
274 UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
275
276 swslot = 0;
277 pageout_done = 0;
278 slock = NULL;
279 uobj = NULL;
280 pg = pgs[0];
281 swap = (pg->uanon != NULL && pg->uobject == NULL) ||
282 (pg->pqflags & PQ_AOBJ) != 0;
283 if (!swap) {
284 uobj = pg->uobject;
285 slock = &uobj->vmobjlock;
286 mutex_enter(slock);
287 mutex_enter(&uvm_pageqlock);
288 } else {
289 #if defined(VMSWAP)
290 if (error) {
291 if (pg->uobject != NULL) {
292 swslot = uao_find_swslot(pg->uobject,
293 pg->offset >> PAGE_SHIFT);
294 } else {
295 KASSERT(pg->uanon != NULL);
296 swslot = pg->uanon->an_swslot;
297 }
298 KASSERT(swslot);
299 }
300 #else /* defined(VMSWAP) */
301 panic("%s: swap", __func__);
302 #endif /* defined(VMSWAP) */
303 }
304 for (i = 0; i < npages; i++) {
305 pg = pgs[i];
306 KASSERT(swap || pg->uobject == uobj);
307 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
308
309 #if defined(VMSWAP)
310 /*
311 * for swap i/os, lock each page's object (or anon)
312 * individually since each page may need a different lock.
313 */
314
315 if (swap) {
316 if (pg->uobject != NULL) {
317 slock = &pg->uobject->vmobjlock;
318 } else {
319 slock = &pg->uanon->an_lock;
320 }
321 mutex_enter(slock);
322 mutex_enter(&uvm_pageqlock);
323 }
324 #endif /* defined(VMSWAP) */
325
326 /*
327 * process errors. for reads, just mark the page to be freed.
328 * for writes, if the error was ENOMEM, we assume this was
329 * a transient failure so we mark the page dirty so that
330 * we'll try to write it again later. for all other write
331 * errors, we assume the error is permanent, thus the data
332 * in the page is lost. bummer.
333 */
334
335 if (error) {
336 int slot;
337 if (!write) {
338 pg->flags |= PG_RELEASED;
339 continue;
340 } else if (error == ENOMEM) {
341 if (pg->flags & PG_PAGEOUT) {
342 pg->flags &= ~PG_PAGEOUT;
343 pageout_done++;
344 }
345 pg->flags &= ~PG_CLEAN;
346 uvm_pageactivate(pg);
347 slot = 0;
348 } else
349 slot = SWSLOT_BAD;
350
351 #if defined(VMSWAP)
352 if (swap) {
353 if (pg->uobject != NULL) {
354 int oldslot;
355 oldslot = uao_set_swslot(pg->uobject,
356 pg->offset >> PAGE_SHIFT, slot);
357 KASSERT(oldslot == swslot + i);
358 } else {
359 KASSERT(pg->uanon->an_swslot ==
360 swslot + i);
361 pg->uanon->an_swslot = slot;
362 }
363 }
364 #endif /* defined(VMSWAP) */
365 }
366
367 /*
368 * if the page is PG_FAKE, this must have been a read to
369 * initialize the page. clear PG_FAKE and activate the page.
370 * we must also clear the pmap "modified" flag since it may
371 * still be set from the page's previous identity.
372 */
373
374 if (pg->flags & PG_FAKE) {
375 KASSERT(!write);
376 pg->flags &= ~PG_FAKE;
377 #if defined(READAHEAD_STATS)
378 pg->pqflags |= PQ_READAHEAD;
379 uvm_ra_total.ev_count++;
380 #endif /* defined(READAHEAD_STATS) */
381 KASSERT((pg->flags & PG_CLEAN) != 0);
382 uvm_pageenqueue(pg);
383 pmap_clear_modify(pg);
384 }
385
386 /*
387 * do accounting for pagedaemon i/o and arrange to free
388 * the pages instead of just unbusying them.
389 */
390
391 if (pg->flags & PG_PAGEOUT) {
392 pg->flags &= ~PG_PAGEOUT;
393 pageout_done++;
394 uvmexp.pdfreed++;
395 pg->flags |= PG_RELEASED;
396 }
397
398 #if defined(VMSWAP)
399 /*
400 * for swap pages, unlock everything for this page now.
401 */
402
403 if (swap) {
404 if (pg->uobject == NULL && pg->uanon->an_ref == 0 &&
405 (pg->flags & PG_RELEASED) != 0) {
406 mutex_exit(&uvm_pageqlock);
407 uvm_anon_release(pg->uanon);
408 } else {
409 uvm_page_unbusy(&pg, 1);
410 mutex_exit(&uvm_pageqlock);
411 mutex_exit(slock);
412 }
413 }
414 #endif /* defined(VMSWAP) */
415 }
416 uvm_pageout_done(pageout_done);
417 if (!swap) {
418 uvm_page_unbusy(pgs, npages);
419 mutex_exit(&uvm_pageqlock);
420 mutex_exit(slock);
421 } else {
422 #if defined(VMSWAP)
423 KASSERT(write);
424
425 /* these pages are now only in swap. */
426 mutex_enter(&uvm_swap_data_lock);
427 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
428 if (error != ENOMEM)
429 uvmexp.swpgonly += npages;
430 mutex_exit(&uvm_swap_data_lock);
431 if (error) {
432 if (error != ENOMEM)
433 uvm_swap_markbad(swslot, npages);
434 else
435 uvm_swap_free(swslot, npages);
436 }
437 uvmexp.pdpending--;
438 #endif /* defined(VMSWAP) */
439 }
440 }
441
442 /*
443 * uvm_aio_aiodone: do iodone processing for async i/os.
444 * this should be called in thread context, not interrupt context.
445 */
446
447 void
448 uvm_aio_aiodone(struct buf *bp)
449 {
450 int npages = bp->b_bufsize >> PAGE_SHIFT;
451 struct vm_page *pgs[npages];
452 int i, error;
453 bool write;
454 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
455 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
456
457 error = bp->b_error;
458 write = (bp->b_flags & B_READ) == 0;
459
460 for (i = 0; i < npages; i++) {
461 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
462 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
463 }
464 uvm_pagermapout((vaddr_t)bp->b_data, npages);
465
466 uvm_aio_aiodone_pages(pgs, npages, write, error);
467
468 if (write && (bp->b_cflags & BC_AGE) != 0) {
469 mutex_enter(bp->b_objlock);
470 vwakeup(bp);
471 mutex_exit(bp->b_objlock);
472 }
473 putiobuf(bp);
474 }
475
476 /*
477 * uvm_pageratop: convert KVAs in the pager map back to their page
478 * structures.
479 */
480
481 struct vm_page *
482 uvm_pageratop(vaddr_t kva)
483 {
484 struct vm_page *pg;
485 paddr_t pa;
486 bool rv;
487
488 rv = pmap_extract(pmap_kernel(), kva, &pa);
489 KASSERT(rv);
490 pg = PHYS_TO_VM_PAGE(pa);
491 KASSERT(pg != NULL);
492 return (pg);
493 }
494