uvm_pager.c revision 1.45 1 /* $NetBSD: uvm_pager.c,v 1.45 2001/05/25 04:06:16 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/vnode.h>
49
50 #define UVM_PAGER
51 #include <uvm/uvm.h>
52
53 struct pool *uvm_aiobuf_pool;
54
55 /*
56 * list of uvm pagers in the system
57 */
58
59 extern struct uvm_pagerops uvm_deviceops;
60 extern struct uvm_pagerops uvm_vnodeops;
61 extern struct uvm_pagerops ubc_pager;
62
63 struct uvm_pagerops *uvmpagerops[] = {
64 &aobj_pager,
65 &uvm_deviceops,
66 &uvm_vnodeops,
67 &ubc_pager,
68 };
69
70 /*
71 * the pager map: provides KVA for I/O
72 */
73
74 vm_map_t pager_map; /* XXX */
75 simple_lock_data_t pager_map_wanted_lock;
76 boolean_t pager_map_wanted; /* locked by pager map */
77 static vaddr_t emergva;
78 static boolean_t emerginuse;
79
80 /*
81 * uvm_pager_init: init pagers (at boot time)
82 */
83
84 void
85 uvm_pager_init()
86 {
87 int lcv;
88
89 /*
90 * init pager map
91 */
92
93 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
94 PAGER_MAP_SIZE, 0, FALSE, NULL);
95 simple_lock_init(&pager_map_wanted_lock);
96 pager_map_wanted = FALSE;
97 emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
98 emerginuse = FALSE;
99
100 /*
101 * init ASYNC I/O queue
102 */
103
104 TAILQ_INIT(&uvm.aio_done);
105
106 /*
107 * call pager init functions
108 */
109 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
110 lcv++) {
111 if (uvmpagerops[lcv]->pgo_init)
112 uvmpagerops[lcv]->pgo_init();
113 }
114 }
115
116 /*
117 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
118 *
119 * we basically just map in a blank map entry to reserve the space in the
120 * map and then use pmap_enter() to put the mappings in by hand.
121 */
122
123 vaddr_t
124 uvm_pagermapin(pps, npages, flags)
125 struct vm_page **pps;
126 int npages;
127 int flags;
128 {
129 vsize_t size;
130 vaddr_t kva;
131 vaddr_t cva;
132 struct vm_page *pp;
133 vm_prot_t prot;
134 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
135
136 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
137
138 /*
139 * compute protection. outgoing I/O only needs read
140 * access to the page, whereas incoming needs read/write.
141 */
142
143 prot = VM_PROT_READ;
144 if (flags & UVMPAGER_MAPIN_READ)
145 prot |= VM_PROT_WRITE;
146
147 ReStart:
148 size = npages << PAGE_SHIFT;
149 kva = 0; /* let system choose VA */
150
151 if (uvm_map(pager_map, &kva, size, NULL,
152 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) {
153 if (curproc == uvm.pagedaemon_proc) {
154 simple_lock(&pager_map_wanted_lock);
155 if (emerginuse) {
156 UVM_UNLOCK_AND_WAIT(&emergva,
157 &pager_map_wanted_lock, FALSE,
158 "emergva", 0);
159 goto ReStart;
160 }
161 emerginuse = TRUE;
162 simple_unlock(&pager_map_wanted_lock);
163 kva = emergva;
164 KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
165 goto enter;
166 }
167 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
168 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
169 return(0);
170 }
171 simple_lock(&pager_map_wanted_lock);
172 pager_map_wanted = TRUE;
173 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
174 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
175 "pager_map", 0);
176 goto ReStart;
177 }
178
179 enter:
180 /* got it */
181 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
182 pp = *pps++;
183 KASSERT(pp);
184 KASSERT(pp->flags & PG_BUSY);
185 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
186 prot, PMAP_WIRED | ((pp->flags & PG_FAKE) ? prot :
187 VM_PROT_READ));
188 }
189 pmap_update();
190
191 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
192 return(kva);
193 }
194
195 /*
196 * uvm_pagermapout: remove pager_map mapping
197 *
198 * we remove our mappings by hand and then remove the mapping (waking
199 * up anyone wanting space).
200 */
201
202 void
203 uvm_pagermapout(kva, npages)
204 vaddr_t kva;
205 int npages;
206 {
207 vsize_t size = npages << PAGE_SHIFT;
208 vm_map_entry_t entries;
209 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
210
211 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
212
213 /*
214 * duplicate uvm_unmap, but add in pager_map_wanted handling.
215 */
216
217 if (kva == emergva) {
218 simple_lock(&pager_map_wanted_lock);
219 emerginuse = FALSE;
220 wakeup(&emergva);
221 simple_unlock(&pager_map_wanted_lock);
222 entries = NULL;
223 goto remove;
224 }
225
226 vm_map_lock(pager_map);
227 uvm_unmap_remove(pager_map, kva, kva + size, &entries);
228 simple_lock(&pager_map_wanted_lock);
229 if (pager_map_wanted) {
230 pager_map_wanted = FALSE;
231 wakeup(pager_map);
232 }
233 simple_unlock(&pager_map_wanted_lock);
234 vm_map_unlock(pager_map);
235
236 remove:
237 pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
238 if (entries)
239 uvm_unmap_detach(entries, 0);
240 pmap_update();
241 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
242 }
243
244 /*
245 * uvm_mk_pcluster
246 *
247 * generic "make 'pager put' cluster" function. a pager can either
248 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
249 * generic function, or [3] set it to a pager specific function.
250 *
251 * => caller must lock object _and_ pagequeues (since we need to look
252 * at active vs. inactive bits, etc.)
253 * => caller must make center page busy and write-protect it
254 * => we mark all cluster pages busy for the caller
255 * => the caller must unbusy all pages (and check wanted/released
256 * status if it drops the object lock)
257 * => flags:
258 * PGO_ALLPAGES: all pages in object are valid targets
259 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
260 * PGO_DOACTCLUST: include active pages in cluster.
261 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
262 * PG_CLEANCHK is only a hint, but clearing will help reduce
263 * the number of calls we make to the pmap layer.
264 */
265
266 struct vm_page **
267 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
268 struct uvm_object *uobj; /* IN */
269 struct vm_page **pps, *center; /* IN/OUT, IN */
270 int *npages, flags; /* IN/OUT, IN */
271 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
272 {
273 struct vm_page **ppsp, *pclust;
274 voff_t lo, hi, curoff;
275 int center_idx, forward, incr;
276 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
277
278 /*
279 * center page should already be busy and write protected. XXX:
280 * suppose page is wired? if we lock, then a process could
281 * fault/block on it. if we don't lock, a process could write the
282 * pages in the middle of an I/O. (consider an msync()). let's
283 * lock it for now (better to delay than corrupt data?).
284 */
285
286 /*
287 * get cluster boundaries, check sanity, and apply our limits as well.
288 */
289
290 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
291 if ((flags & PGO_ALLPAGES) == 0) {
292 if (lo < mlo)
293 lo = mlo;
294 if (hi > mhi)
295 hi = mhi;
296 }
297 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
298 pps[0] = center;
299 *npages = 1;
300 return(pps);
301 }
302
303 /*
304 * now determine the center and attempt to cluster around the
305 * edges
306 */
307
308 center_idx = (center->offset - lo) >> PAGE_SHIFT;
309 pps[center_idx] = center; /* plug in the center page */
310 ppsp = &pps[center_idx];
311 *npages = 1;
312
313 /*
314 * attempt to cluster around the left [backward], and then
315 * the right side [forward].
316 */
317
318 for (forward = 0 ; forward <= 1 ; forward++) {
319 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
320 curoff = center->offset + incr;
321 for ( ;(forward == 0 && curoff >= lo) ||
322 (forward && curoff < hi);
323 curoff += incr) {
324
325 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
326 if (pclust == NULL) {
327 break; /* no page */
328 }
329
330 if ((flags & PGO_DOACTCLUST) == 0) {
331 /* dont want mapped pages at all */
332 break;
333 }
334
335 /*
336 * get an up-to-date view of the "clean" bit.
337 * note this isn't 100% accurate, but it doesn't
338 * have to be. if it's not quite right, the
339 * worst that happens is we don't cluster as
340 * aggressively. we'll sync-it-for-sure before
341 * we free the page, and clean it if necessary.
342 */
343 if ((pclust->flags & PG_CLEANCHK) == 0) {
344 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
345 == PG_CLEAN &&
346 pmap_is_modified(pclust))
347 pclust->flags &= ~PG_CLEAN;
348
349 /* now checked */
350 pclust->flags |= PG_CLEANCHK;
351 }
352
353 /* is page available for cleaning and does it need it */
354 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
355 break; /* page is already clean or is busy */
356 }
357
358 /* yes! enroll the page in our array */
359 pclust->flags |= PG_BUSY; /* busy! */
360 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
361
362 /* XXX: protect wired page? see above comment. */
363 pmap_page_protect(pclust, VM_PROT_READ);
364 if (!forward) {
365 ppsp--; /* back up one page */
366 *ppsp = pclust;
367 } else {
368 /* move forward one page */
369 ppsp[*npages] = pclust;
370 }
371 (*npages)++;
372 }
373 }
374
375 /*
376 * done! return the cluster array to the caller!!!
377 */
378
379 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
380 return(ppsp);
381 }
382
383 /*
384 * uvm_pager_put: high level pageout routine
385 *
386 * we want to pageout page "pg" to backing store, clustering if
387 * possible.
388 *
389 * => page queues must be locked by caller
390 * => if page is not swap-backed, then "uobj" points to the object
391 * backing it. this object should be locked by the caller.
392 * => if page is swap-backed, then "uobj" should be NULL.
393 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
394 * for swap-backed memory, "pg" can be NULL if there is no page
395 * of interest [sometimes the case for the pagedaemon]
396 * => "ppsp_ptr" should point to an array of npages vm_page pointers
397 * for possible cluster building
398 * => flags (first two for non-swap-backed pages)
399 * PGO_ALLPAGES: all pages in uobj are valid targets
400 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
401 * PGO_SYNCIO: wait for i/o to complete
402 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
403 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
404 * if (!uobj) start is the (daddr_t) of the starting swapblk
405 * => return state:
406 * 1. we return the error code of the pageout
407 * 2. we return with the page queues unlocked
408 * 3. if (uobj != NULL) [!swap_backed] we return with
409 * uobj locked _only_ if PGO_PDFREECLUST is set
410 * AND result == 0 AND async. in all other cases
411 * we return with uobj unlocked. [this is a hack
412 * that allows the pagedaemon to save one lock/unlock
413 * pair in the !swap_backed case since we have to
414 * lock the uobj to drop the cluster anyway]
415 * 4. on errors we always drop the cluster. thus, if we return
416 * an error, then the caller only has to worry about
417 * un-busying the main page (not the cluster pages).
418 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
419 * with all pages busy (caller must un-busy and check
420 * wanted/released flags).
421 */
422
423 int
424 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
425 struct uvm_object *uobj; /* IN */
426 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
427 int *npages; /* IN/OUT */
428 int flags; /* IN */
429 voff_t start, stop; /* IN, IN */
430 {
431 int result;
432 daddr_t swblk;
433 boolean_t async = (flags & PGO_SYNCIO) == 0;
434 struct vm_page **ppsp = *ppsp_ptr;
435 UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(ubchist);
436
437 /*
438 * note that uobj is null if we are doing a swap-backed pageout.
439 * note that uobj is !null if we are doing normal object pageout.
440 * note that the page queues must be locked to cluster.
441 */
442
443 if (uobj) { /* if !swap-backed */
444
445 /*
446 * attempt to build a cluster for pageout using its
447 * make-put-cluster function (if it has one).
448 */
449
450 if (uobj->pgops->pgo_mk_pcluster) {
451 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
452 npages, pg, flags, start, stop);
453 *ppsp_ptr = ppsp; /* update caller's pointer */
454 } else {
455 ppsp[0] = pg;
456 *npages = 1;
457 }
458
459 swblk = 0; /* XXX: keep gcc happy */
460
461 } else {
462
463 /*
464 * for swap-backed pageout, the caller (the pagedaemon) has
465 * already built the cluster for us. the starting swap
466 * block we are writing to has been passed in as "start."
467 * "pg" could be NULL if there is no page we are especially
468 * interested in (in which case the whole cluster gets dropped
469 * in the event of an error or a sync "done").
470 */
471 swblk = (daddr_t) start;
472 /* ppsp and npages should be ok */
473 }
474
475 /* now that we've clustered we can unlock the page queues */
476 uvm_unlock_pageq();
477
478 /*
479 * now attempt the I/O. if we have a failure and we are
480 * clustered, we will drop the cluster and try again.
481 */
482
483 ReTry:
484 if (uobj) {
485 /* object is locked */
486 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
487 UVMHIST_LOG(ubchist, "put -> %d", result, 0,0,0);
488 /* object is now unlocked */
489 } else {
490 /* nothing locked */
491 result = uvm_swap_put(swblk, ppsp, *npages, flags);
492 /* nothing locked */
493 }
494
495 /*
496 * we have attempted the I/O.
497 *
498 * if the I/O was a success then:
499 * if !PGO_PDFREECLUST, we return the cluster to the
500 * caller (who must un-busy all pages)
501 * else we un-busy cluster pages for the pagedaemon
502 *
503 * if I/O is pending (async i/o) then we return the pending code.
504 * [in this case the async i/o done function must clean up when
505 * i/o is done...]
506 */
507
508 if (result == 0) {
509 if (flags & PGO_PDFREECLUST && !async) {
510
511 /*
512 * drop cluster and relock object for sync i/o.
513 */
514
515 if (uobj)
516 /* required for dropcluster */
517 simple_lock(&uobj->vmobjlock);
518 if (*npages > 1 || pg == NULL)
519 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
520 PGO_PDFREECLUST);
521
522 /* if (uobj): object still locked, as per #3 */
523 }
524 return (result);
525 }
526
527 /*
528 * a pager error occured.
529 * for transient errors, drop to a cluster of 1 page ("pg")
530 * and try again. for hard errors, don't bother retrying.
531 */
532
533 if (*npages > 1 || pg == NULL) {
534 if (uobj) {
535 simple_lock(&uobj->vmobjlock);
536 }
537 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
538
539 /*
540 * for failed swap-backed pageouts with a "pg",
541 * we need to reset pg's swslot to either:
542 * "swblk" (for transient errors, so we can retry),
543 * or 0 (for hard errors).
544 */
545
546 if (uobj == NULL && pg != NULL) {
547 int nswblk = (result == EAGAIN) ? swblk : 0;
548 if (pg->pqflags & PQ_ANON) {
549 simple_lock(&pg->uanon->an_lock);
550 pg->uanon->an_swslot = nswblk;
551 simple_unlock(&pg->uanon->an_lock);
552 } else {
553 simple_lock(&pg->uobject->vmobjlock);
554 uao_set_swslot(pg->uobject,
555 pg->offset >> PAGE_SHIFT,
556 nswblk);
557 simple_unlock(&pg->uobject->vmobjlock);
558 }
559 }
560 if (result == EAGAIN) {
561
562 /*
563 * for transient failures, free all the swslots that
564 * we're not going to retry with.
565 */
566
567 if (uobj == NULL) {
568 if (pg) {
569 uvm_swap_free(swblk + 1, *npages - 1);
570 } else {
571 uvm_swap_free(swblk, *npages);
572 }
573 }
574 if (pg) {
575 ppsp[0] = pg;
576 *npages = 1;
577 goto ReTry;
578 }
579 } else if (uobj == NULL) {
580
581 /*
582 * for hard errors on swap-backed pageouts,
583 * mark the swslots as bad. note that we do not
584 * free swslots that we mark bad.
585 */
586
587 uvm_swap_markbad(swblk, *npages);
588 }
589 }
590
591 /*
592 * a pager error occured (even after dropping the cluster, if there
593 * was one). give up! the caller only has one page ("pg")
594 * to worry about.
595 */
596
597 if (uobj && (flags & PGO_PDFREECLUST) != 0)
598 simple_lock(&uobj->vmobjlock);
599 return(result);
600 }
601
602 /*
603 * uvm_pager_dropcluster: drop a cluster we have built (because we
604 * got an error, or, if PGO_PDFREECLUST we are un-busying the
605 * cluster pages on behalf of the pagedaemon).
606 *
607 * => uobj, if non-null, is a non-swap-backed object that is
608 * locked by the caller. we return with this object still
609 * locked.
610 * => page queues are not locked
611 * => pg is our page of interest (the one we clustered around, can be null)
612 * => ppsp/npages is our current cluster
613 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
614 * pages on behalf of the pagedaemon.
615 * PGO_REALLOCSWAP: drop previously allocated swap slots for
616 * clustered swap-backed pages (except for "pg" if !NULL)
617 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
618 * [only meaningful if swap-backed (uobj == NULL)]
619 */
620
621 void
622 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
623 struct uvm_object *uobj; /* IN */
624 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
625 int *npages; /* IN/OUT */
626 int flags;
627 {
628 int lcv;
629 boolean_t obj_is_alive;
630 struct uvm_object *saved_uobj;
631
632 /*
633 * drop all pages but "pg"
634 */
635
636 for (lcv = 0 ; lcv < *npages ; lcv++) {
637
638 /* skip "pg" or empty slot */
639 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
640 continue;
641
642 /*
643 * if swap-backed, gain lock on object that owns page. note
644 * that PQ_ANON bit can't change as long as we are holding
645 * the PG_BUSY bit (so there is no need to lock the page
646 * queues to test it).
647 *
648 * once we have the lock, dispose of the pointer to swap, if
649 * requested
650 */
651 if (!uobj) {
652 if (ppsp[lcv]->pqflags & PQ_ANON) {
653 simple_lock(&ppsp[lcv]->uanon->an_lock);
654 if (flags & PGO_REALLOCSWAP)
655 /* zap swap block */
656 ppsp[lcv]->uanon->an_swslot = 0;
657 } else {
658 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
659 if (flags & PGO_REALLOCSWAP)
660 uao_set_swslot(ppsp[lcv]->uobject,
661 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
662 }
663 }
664
665 /* did someone want the page while we had it busy-locked? */
666 if (ppsp[lcv]->flags & PG_WANTED) {
667 /* still holding obj lock */
668 wakeup(ppsp[lcv]);
669 }
670
671 /* if page was released, release it. otherwise un-busy it */
672 if (ppsp[lcv]->flags & PG_RELEASED) {
673
674 if (ppsp[lcv]->pqflags & PQ_ANON) {
675 /* so that anfree will free */
676 ppsp[lcv]->flags &= ~(PG_BUSY);
677 UVM_PAGE_OWN(ppsp[lcv], NULL);
678
679 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
680 simple_unlock(&ppsp[lcv]->uanon->an_lock);
681 /* kills anon and frees pg */
682 uvm_anfree(ppsp[lcv]->uanon);
683
684 continue;
685 }
686
687 /*
688 * pgo_releasepg will dump the page for us
689 */
690
691 saved_uobj = ppsp[lcv]->uobject;
692 obj_is_alive =
693 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
694
695 /* for normal objects, "pg" is still PG_BUSY by us,
696 * so obj can't die */
697 KASSERT(!uobj || obj_is_alive);
698
699 /* only unlock the object if it is still alive... */
700 if (obj_is_alive && saved_uobj != uobj)
701 simple_unlock(&saved_uobj->vmobjlock);
702
703 /*
704 * XXXCDC: suppose uobj died in the pgo_releasepg?
705 * how pass that
706 * info up to caller. we are currently ignoring it...
707 */
708
709 continue; /* next page */
710
711 } else {
712 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
713 UVM_PAGE_OWN(ppsp[lcv], NULL);
714 }
715
716 /*
717 * if we are operating on behalf of the pagedaemon and we
718 * had a successful pageout update the page!
719 */
720 if (flags & PGO_PDFREECLUST) {
721 pmap_clear_reference(ppsp[lcv]);
722 pmap_clear_modify(ppsp[lcv]);
723 ppsp[lcv]->flags |= PG_CLEAN;
724 }
725
726 /* if anonymous cluster, unlock object and move on */
727 if (!uobj) {
728 if (ppsp[lcv]->pqflags & PQ_ANON)
729 simple_unlock(&ppsp[lcv]->uanon->an_lock);
730 else
731 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
732 }
733 }
734 }
735
736 /*
737 * interrupt-context iodone handler for nested i/o bufs.
738 *
739 * => must be at splbio().
740 */
741
742 void
743 uvm_aio_biodone1(bp)
744 struct buf *bp;
745 {
746 struct buf *mbp = bp->b_private;
747
748 KASSERT(mbp != bp);
749 if (bp->b_flags & B_ERROR) {
750 mbp->b_flags |= B_ERROR;
751 mbp->b_error = bp->b_error;
752 }
753 mbp->b_resid -= bp->b_bcount;
754 pool_put(&bufpool, bp);
755 if (mbp->b_resid == 0) {
756 biodone(mbp);
757 }
758 }
759
760 /*
761 * interrupt-context iodone handler for single-buf i/os
762 * or the top-level buf of a nested-buf i/o.
763 *
764 * => must be at splbio().
765 */
766
767 void
768 uvm_aio_biodone(bp)
769 struct buf *bp;
770 {
771 /* reset b_iodone for when this is a single-buf i/o. */
772 bp->b_iodone = uvm_aio_aiodone;
773
774 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
775 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
776 wakeup(&uvm.aiodoned);
777 simple_unlock(&uvm.aiodoned_lock);
778 }
779
780 /*
781 * uvm_aio_aiodone: do iodone processing for async i/os.
782 * this should be called in thread context, not interrupt context.
783 */
784
785 void
786 uvm_aio_aiodone(bp)
787 struct buf *bp;
788 {
789 int npages = bp->b_bufsize >> PAGE_SHIFT;
790 struct vm_page *pg, *pgs[npages];
791 struct uvm_object *uobj;
792 int s, i, error;
793 boolean_t write, swap;
794 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
795 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
796
797 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
798 write = (bp->b_flags & B_READ) == 0;
799 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */
800 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
801 (*bioops.io_pageiodone)(bp);
802 }
803
804 uobj = NULL;
805 for (i = 0; i < npages; i++) {
806 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
807 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
808 }
809 uvm_pagermapout((vaddr_t)bp->b_data, npages);
810 for (i = 0; i < npages; i++) {
811 pg = pgs[i];
812
813 if (i == 0) {
814 swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
815 if (!swap) {
816 uobj = pg->uobject;
817 simple_lock(&uobj->vmobjlock);
818 }
819 }
820 KASSERT(swap || pg->uobject == uobj);
821 if (swap) {
822 if (pg->pqflags & PQ_ANON) {
823 simple_lock(&pg->uanon->an_lock);
824 } else {
825 simple_lock(&pg->uobject->vmobjlock);
826 }
827 }
828
829 /*
830 * if this is a read and we got an error, mark the pages
831 * PG_RELEASED so that uvm_page_unbusy() will free them.
832 */
833
834 if (!write && error) {
835 pg->flags |= PG_RELEASED;
836 continue;
837 }
838 KASSERT(!write || (pgs[i]->flags & PG_FAKE) == 0);
839
840 /*
841 * if this is a read and the page is PG_FAKE,
842 * or this was a successful write,
843 * mark the page PG_CLEAN and not PG_FAKE.
844 */
845
846 if ((pgs[i]->flags & PG_FAKE) || (write && error != ENOMEM)) {
847 pmap_clear_reference(pgs[i]);
848 pmap_clear_modify(pgs[i]);
849 pgs[i]->flags |= PG_CLEAN;
850 pgs[i]->flags &= ~PG_FAKE;
851 }
852 uvm_pageactivate(pg);
853 if (swap) {
854 if (pg->pqflags & PQ_ANON) {
855 simple_unlock(&pg->uanon->an_lock);
856 } else {
857 simple_unlock(&pg->uobject->vmobjlock);
858 }
859 }
860 }
861 uvm_page_unbusy(pgs, npages);
862 if (!swap) {
863 simple_unlock(&uobj->vmobjlock);
864 }
865
866 s = splbio();
867 if (write && (bp->b_flags & B_AGE) != 0) {
868 vwakeup(bp);
869 }
870 pool_put(&bufpool, bp);
871 splx(s);
872 }
873