uvm_pager.c revision 1.58 1 1.58 chs /* $NetBSD: uvm_pager.c,v 1.58 2002/10/01 07:52:30 chs Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg *
5 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.1 mrg * All rights reserved.
7 1.1 mrg *
8 1.1 mrg * Redistribution and use in source and binary forms, with or without
9 1.1 mrg * modification, are permitted provided that the following conditions
10 1.1 mrg * are met:
11 1.1 mrg * 1. Redistributions of source code must retain the above copyright
12 1.1 mrg * notice, this list of conditions and the following disclaimer.
13 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 mrg * notice, this list of conditions and the following disclaimer in the
15 1.1 mrg * documentation and/or other materials provided with the distribution.
16 1.1 mrg * 3. All advertising materials mentioning features or use of this software
17 1.1 mrg * must display the following acknowledgement:
18 1.1 mrg * This product includes software developed by Charles D. Cranor and
19 1.1 mrg * Washington University.
20 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
21 1.1 mrg * derived from this software without specific prior written permission.
22 1.1 mrg *
23 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.3 mrg *
34 1.3 mrg * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 1.1 mrg */
36 1.1 mrg
37 1.1 mrg /*
38 1.1 mrg * uvm_pager.c: generic functions used to assist the pagers.
39 1.1 mrg */
40 1.54 lukem
41 1.54 lukem #include <sys/cdefs.h>
42 1.58 chs __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.58 2002/10/01 07:52:30 chs Exp $");
43 1.54 lukem
44 1.54 lukem #include "opt_uvmhist.h"
45 1.1 mrg
46 1.1 mrg #include <sys/param.h>
47 1.1 mrg #include <sys/systm.h>
48 1.1 mrg #include <sys/proc.h>
49 1.1 mrg #include <sys/malloc.h>
50 1.35 chs #include <sys/pool.h>
51 1.35 chs #include <sys/vnode.h>
52 1.1 mrg
53 1.1 mrg #define UVM_PAGER
54 1.1 mrg #include <uvm/uvm.h>
55 1.1 mrg
56 1.35 chs struct pool *uvm_aiobuf_pool;
57 1.35 chs
58 1.1 mrg /*
59 1.1 mrg * list of uvm pagers in the system
60 1.1 mrg */
61 1.1 mrg
62 1.57 matt struct uvm_pagerops * const uvmpagerops[] = {
63 1.10 thorpej &aobj_pager,
64 1.6 mrg &uvm_deviceops,
65 1.6 mrg &uvm_vnodeops,
66 1.35 chs &ubc_pager,
67 1.1 mrg };
68 1.1 mrg
69 1.1 mrg /*
70 1.1 mrg * the pager map: provides KVA for I/O
71 1.1 mrg */
72 1.1 mrg
73 1.47 chs struct vm_map *pager_map; /* XXX */
74 1.46 chs struct simplelock pager_map_wanted_lock;
75 1.1 mrg boolean_t pager_map_wanted; /* locked by pager map */
76 1.35 chs static vaddr_t emergva;
77 1.35 chs static boolean_t emerginuse;
78 1.1 mrg
79 1.1 mrg /*
80 1.1 mrg * uvm_pager_init: init pagers (at boot time)
81 1.1 mrg */
82 1.1 mrg
83 1.6 mrg void
84 1.6 mrg uvm_pager_init()
85 1.6 mrg {
86 1.6 mrg int lcv;
87 1.50 chs vaddr_t sva, eva;
88 1.1 mrg
89 1.6 mrg /*
90 1.6 mrg * init pager map
91 1.6 mrg */
92 1.6 mrg
93 1.50 chs sva = 0;
94 1.50 chs pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0,
95 1.50 chs FALSE, NULL);
96 1.35 chs simple_lock_init(&pager_map_wanted_lock);
97 1.35 chs pager_map_wanted = FALSE;
98 1.35 chs emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
99 1.35 chs emerginuse = FALSE;
100 1.6 mrg
101 1.6 mrg /*
102 1.6 mrg * init ASYNC I/O queue
103 1.6 mrg */
104 1.45 chs
105 1.6 mrg TAILQ_INIT(&uvm.aio_done);
106 1.1 mrg
107 1.6 mrg /*
108 1.6 mrg * call pager init functions
109 1.6 mrg */
110 1.6 mrg for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
111 1.6 mrg lcv++) {
112 1.6 mrg if (uvmpagerops[lcv]->pgo_init)
113 1.6 mrg uvmpagerops[lcv]->pgo_init();
114 1.6 mrg }
115 1.1 mrg }
116 1.1 mrg
117 1.1 mrg /*
118 1.1 mrg * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
119 1.1 mrg *
120 1.1 mrg * we basically just map in a blank map entry to reserve the space in the
121 1.1 mrg * map and then use pmap_enter() to put the mappings in by hand.
122 1.1 mrg */
123 1.1 mrg
124 1.9 eeh vaddr_t
125 1.35 chs uvm_pagermapin(pps, npages, flags)
126 1.6 mrg struct vm_page **pps;
127 1.6 mrg int npages;
128 1.29 thorpej int flags;
129 1.1 mrg {
130 1.9 eeh vsize_t size;
131 1.9 eeh vaddr_t kva;
132 1.9 eeh vaddr_t cva;
133 1.6 mrg struct vm_page *pp;
134 1.29 thorpej vm_prot_t prot;
135 1.6 mrg UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
136 1.1 mrg
137 1.35 chs UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
138 1.29 thorpej
139 1.29 thorpej /*
140 1.29 thorpej * compute protection. outgoing I/O only needs read
141 1.29 thorpej * access to the page, whereas incoming needs read/write.
142 1.29 thorpej */
143 1.29 thorpej
144 1.29 thorpej prot = VM_PROT_READ;
145 1.29 thorpej if (flags & UVMPAGER_MAPIN_READ)
146 1.29 thorpej prot |= VM_PROT_WRITE;
147 1.1 mrg
148 1.1 mrg ReStart:
149 1.12 chs size = npages << PAGE_SHIFT;
150 1.29 thorpej kva = 0; /* let system choose VA */
151 1.1 mrg
152 1.45 chs if (uvm_map(pager_map, &kva, size, NULL,
153 1.43 chs UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
154 1.35 chs if (curproc == uvm.pagedaemon_proc) {
155 1.35 chs simple_lock(&pager_map_wanted_lock);
156 1.35 chs if (emerginuse) {
157 1.35 chs UVM_UNLOCK_AND_WAIT(&emergva,
158 1.35 chs &pager_map_wanted_lock, FALSE,
159 1.35 chs "emergva", 0);
160 1.35 chs goto ReStart;
161 1.35 chs }
162 1.35 chs emerginuse = TRUE;
163 1.35 chs simple_unlock(&pager_map_wanted_lock);
164 1.35 chs kva = emergva;
165 1.35 chs KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
166 1.35 chs goto enter;
167 1.35 chs }
168 1.29 thorpej if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
169 1.6 mrg UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
170 1.29 thorpej return(0);
171 1.6 mrg }
172 1.6 mrg simple_lock(&pager_map_wanted_lock);
173 1.45 chs pager_map_wanted = TRUE;
174 1.6 mrg UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
175 1.45 chs UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
176 1.35 chs "pager_map", 0);
177 1.6 mrg goto ReStart;
178 1.6 mrg }
179 1.1 mrg
180 1.35 chs enter:
181 1.6 mrg /* got it */
182 1.6 mrg for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
183 1.6 mrg pp = *pps++;
184 1.40 mrg KASSERT(pp);
185 1.38 chs KASSERT(pp->flags & PG_BUSY);
186 1.50 chs pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
187 1.6 mrg }
188 1.49 chris pmap_update(vm_map_pmap(pager_map));
189 1.1 mrg
190 1.6 mrg UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
191 1.6 mrg return(kva);
192 1.1 mrg }
193 1.1 mrg
194 1.1 mrg /*
195 1.1 mrg * uvm_pagermapout: remove pager_map mapping
196 1.1 mrg *
197 1.1 mrg * we remove our mappings by hand and then remove the mapping (waking
198 1.1 mrg * up anyone wanting space).
199 1.1 mrg */
200 1.1 mrg
201 1.6 mrg void
202 1.6 mrg uvm_pagermapout(kva, npages)
203 1.9 eeh vaddr_t kva;
204 1.6 mrg int npages;
205 1.6 mrg {
206 1.12 chs vsize_t size = npages << PAGE_SHIFT;
207 1.47 chs struct vm_map_entry *entries;
208 1.6 mrg UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
209 1.35 chs
210 1.6 mrg UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
211 1.1 mrg
212 1.6 mrg /*
213 1.6 mrg * duplicate uvm_unmap, but add in pager_map_wanted handling.
214 1.6 mrg */
215 1.6 mrg
216 1.50 chs pmap_kremove(kva, npages << PAGE_SHIFT);
217 1.35 chs if (kva == emergva) {
218 1.35 chs simple_lock(&pager_map_wanted_lock);
219 1.35 chs emerginuse = FALSE;
220 1.35 chs wakeup(&emergva);
221 1.35 chs simple_unlock(&pager_map_wanted_lock);
222 1.50 chs return;
223 1.35 chs }
224 1.35 chs
225 1.6 mrg vm_map_lock(pager_map);
226 1.43 chs uvm_unmap_remove(pager_map, kva, kva + size, &entries);
227 1.6 mrg simple_lock(&pager_map_wanted_lock);
228 1.6 mrg if (pager_map_wanted) {
229 1.6 mrg pager_map_wanted = FALSE;
230 1.6 mrg wakeup(pager_map);
231 1.6 mrg }
232 1.6 mrg simple_unlock(&pager_map_wanted_lock);
233 1.6 mrg vm_map_unlock(pager_map);
234 1.6 mrg if (entries)
235 1.6 mrg uvm_unmap_detach(entries, 0);
236 1.49 chris pmap_update(pmap_kernel());
237 1.6 mrg UVMHIST_LOG(maphist,"<- done",0,0,0,0);
238 1.1 mrg }
239 1.1 mrg
240 1.1 mrg /*
241 1.35 chs * interrupt-context iodone handler for nested i/o bufs.
242 1.35 chs *
243 1.35 chs * => must be at splbio().
244 1.35 chs */
245 1.35 chs
246 1.35 chs void
247 1.35 chs uvm_aio_biodone1(bp)
248 1.35 chs struct buf *bp;
249 1.35 chs {
250 1.35 chs struct buf *mbp = bp->b_private;
251 1.35 chs
252 1.35 chs KASSERT(mbp != bp);
253 1.35 chs if (bp->b_flags & B_ERROR) {
254 1.35 chs mbp->b_flags |= B_ERROR;
255 1.35 chs mbp->b_error = bp->b_error;
256 1.35 chs }
257 1.35 chs mbp->b_resid -= bp->b_bcount;
258 1.35 chs pool_put(&bufpool, bp);
259 1.35 chs if (mbp->b_resid == 0) {
260 1.35 chs biodone(mbp);
261 1.35 chs }
262 1.35 chs }
263 1.35 chs
264 1.35 chs /*
265 1.35 chs * interrupt-context iodone handler for single-buf i/os
266 1.35 chs * or the top-level buf of a nested-buf i/o.
267 1.35 chs *
268 1.35 chs * => must be at splbio().
269 1.35 chs */
270 1.35 chs
271 1.35 chs void
272 1.35 chs uvm_aio_biodone(bp)
273 1.35 chs struct buf *bp;
274 1.35 chs {
275 1.35 chs /* reset b_iodone for when this is a single-buf i/o. */
276 1.35 chs bp->b_iodone = uvm_aio_aiodone;
277 1.35 chs
278 1.35 chs simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
279 1.35 chs TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
280 1.35 chs wakeup(&uvm.aiodoned);
281 1.35 chs simple_unlock(&uvm.aiodoned_lock);
282 1.35 chs }
283 1.35 chs
284 1.35 chs /*
285 1.35 chs * uvm_aio_aiodone: do iodone processing for async i/os.
286 1.35 chs * this should be called in thread context, not interrupt context.
287 1.35 chs */
288 1.35 chs
289 1.35 chs void
290 1.35 chs uvm_aio_aiodone(bp)
291 1.35 chs struct buf *bp;
292 1.35 chs {
293 1.35 chs int npages = bp->b_bufsize >> PAGE_SHIFT;
294 1.35 chs struct vm_page *pg, *pgs[npages];
295 1.35 chs struct uvm_object *uobj;
296 1.50 chs struct simplelock *slock;
297 1.50 chs int s, i, error, swslot;
298 1.56 enami boolean_t write, swap;
299 1.35 chs UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
300 1.35 chs UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
301 1.35 chs
302 1.41 chs error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
303 1.35 chs write = (bp->b_flags & B_READ) == 0;
304 1.35 chs /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
305 1.36 chs if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
306 1.36 chs (*bioops.io_pageiodone)(bp);
307 1.35 chs }
308 1.35 chs
309 1.35 chs uobj = NULL;
310 1.35 chs for (i = 0; i < npages; i++) {
311 1.35 chs pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
312 1.35 chs UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
313 1.35 chs }
314 1.35 chs uvm_pagermapout((vaddr_t)bp->b_data, npages);
315 1.50 chs
316 1.50 chs swslot = 0;
317 1.50 chs slock = NULL;
318 1.55 chs pg = pgs[0];
319 1.55 chs swap = (pg->uanon != NULL && pg->uobject == NULL) ||
320 1.55 chs (pg->pqflags & PQ_AOBJ) != 0;
321 1.50 chs if (!swap) {
322 1.55 chs uobj = pg->uobject;
323 1.50 chs slock = &uobj->vmobjlock;
324 1.50 chs simple_lock(slock);
325 1.50 chs uvm_lock_pageq();
326 1.50 chs } else if (error) {
327 1.55 chs if (pg->uobject != NULL) {
328 1.58 chs swslot = uao_find_swslot(pg->uobject,
329 1.58 chs pg->offset >> PAGE_SHIFT);
330 1.55 chs } else {
331 1.50 chs swslot = pg->uanon->an_swslot;
332 1.50 chs }
333 1.50 chs KASSERT(swslot);
334 1.50 chs }
335 1.35 chs for (i = 0; i < npages; i++) {
336 1.35 chs pg = pgs[i];
337 1.50 chs KASSERT(swap || pg->uobject == uobj);
338 1.50 chs UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0);
339 1.50 chs
340 1.50 chs /*
341 1.50 chs * for swap i/os, lock each page's object (or anon)
342 1.50 chs * individually since each page may need a different lock.
343 1.50 chs */
344 1.35 chs
345 1.35 chs if (swap) {
346 1.55 chs if (pg->uobject != NULL) {
347 1.55 chs slock = &pg->uobject->vmobjlock;
348 1.55 chs } else {
349 1.50 chs slock = &pg->uanon->an_lock;
350 1.35 chs }
351 1.50 chs simple_lock(slock);
352 1.50 chs uvm_lock_pageq();
353 1.50 chs }
354 1.50 chs
355 1.50 chs /*
356 1.50 chs * process errors. for reads, just mark the page to be freed.
357 1.50 chs * for writes, if the error was ENOMEM, we assume this was
358 1.50 chs * a transient failure so we mark the page dirty so that
359 1.50 chs * we'll try to write it again later. for all other write
360 1.50 chs * errors, we assume the error is permanent, thus the data
361 1.50 chs * in the page is lost. bummer.
362 1.50 chs */
363 1.50 chs
364 1.50 chs if (error) {
365 1.50 chs if (!write) {
366 1.50 chs pg->flags |= PG_RELEASED;
367 1.50 chs continue;
368 1.50 chs } else if (error == ENOMEM) {
369 1.50 chs if (pg->flags & PG_PAGEOUT) {
370 1.50 chs pg->flags &= ~PG_PAGEOUT;
371 1.50 chs uvmexp.paging--;
372 1.50 chs }
373 1.50 chs pg->flags &= ~PG_CLEAN;
374 1.50 chs uvm_pageactivate(pg);
375 1.50 chs }
376 1.50 chs }
377 1.50 chs
378 1.50 chs /*
379 1.50 chs * if the page is PG_FAKE, this must have been a read to
380 1.50 chs * initialize the page. clear PG_FAKE and activate the page.
381 1.53 chs * we must also clear the pmap "modified" flag since it may
382 1.53 chs * still be set from the page's previous identity.
383 1.50 chs */
384 1.50 chs
385 1.50 chs if (pg->flags & PG_FAKE) {
386 1.50 chs KASSERT(!write);
387 1.50 chs pg->flags &= ~PG_FAKE;
388 1.50 chs uvm_pageactivate(pg);
389 1.50 chs pmap_clear_modify(pg);
390 1.35 chs }
391 1.35 chs
392 1.35 chs /*
393 1.53 chs * do accounting for pagedaemon i/o and arrange to free
394 1.53 chs * the pages instead of just unbusying them.
395 1.35 chs */
396 1.35 chs
397 1.53 chs if (pg->flags & PG_PAGEOUT) {
398 1.50 chs pg->flags &= ~PG_PAGEOUT;
399 1.50 chs uvmexp.paging--;
400 1.35 chs pg->flags |= PG_RELEASED;
401 1.35 chs }
402 1.35 chs
403 1.35 chs /*
404 1.50 chs * for swap pages, unlock everything for this page now.
405 1.35 chs */
406 1.35 chs
407 1.35 chs if (swap) {
408 1.50 chs uvm_page_unbusy(&pg, 1);
409 1.50 chs uvm_unlock_pageq();
410 1.50 chs simple_unlock(slock);
411 1.35 chs }
412 1.35 chs }
413 1.35 chs if (!swap) {
414 1.50 chs uvm_page_unbusy(pgs, npages);
415 1.50 chs uvm_unlock_pageq();
416 1.50 chs simple_unlock(slock);
417 1.50 chs } else {
418 1.53 chs KASSERT(write);
419 1.53 chs
420 1.53 chs /* these pages are now only in swap. */
421 1.53 chs simple_lock(&uvm.swap_data_lock);
422 1.53 chs KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse);
423 1.53 chs uvmexp.swpgonly += npages;
424 1.53 chs simple_unlock(&uvm.swap_data_lock);
425 1.50 chs if (error) {
426 1.50 chs uvm_swap_markbad(swslot, npages);
427 1.50 chs }
428 1.35 chs }
429 1.35 chs s = splbio();
430 1.35 chs if (write && (bp->b_flags & B_AGE) != 0) {
431 1.35 chs vwakeup(bp);
432 1.35 chs }
433 1.35 chs pool_put(&bufpool, bp);
434 1.35 chs splx(s);
435 1.1 mrg }
436