uvm_pager.c revision 1.16.4.3 1 /* $NetBSD: uvm_pager.c,v 1.16.4.3 1999/07/04 02:02:32 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_pmap_new.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47 #include <sys/pool.h>
48 #include <sys/vnode.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_kern.h>
53
54 #define UVM_PAGER
55 #include <uvm/uvm.h>
56
57 struct pool *uvm_aiobuf_pool;
58
59 /*
60 * list of uvm pagers in the system
61 */
62
63 extern struct uvm_pagerops aobj_pager;
64 extern struct uvm_pagerops uvm_deviceops;
65 extern struct uvm_pagerops uvm_vnodeops;
66 extern struct uvm_pagerops ubc_pager;
67
68 struct uvm_pagerops *uvmpagerops[] = {
69 &aobj_pager,
70 &uvm_deviceops,
71 &uvm_vnodeops,
72 &ubc_pager,
73 };
74
75 /*
76 * the pager map: provides KVA for I/O
77 */
78
79 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
80 vm_map_t pager_map; /* XXX */
81 simple_lock_data_t pager_map_wanted_lock;
82 boolean_t pager_map_wanted; /* locked by pager map */
83
84
85 /*
86 * uvm_pager_init: init pagers (at boot time)
87 */
88
89 void
90 uvm_pager_init()
91 {
92 int lcv;
93
94 /*
95 * init pager map
96 */
97
98 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
99 PAGER_MAP_SIZE, 0, FALSE, NULL);
100 simple_lock_init(&pager_map_wanted_lock);
101 pager_map_wanted = FALSE;
102
103 /*
104 * init ASYNC I/O queue
105 */
106
107 TAILQ_INIT(&uvm.aio_done);
108
109 /*
110 * call pager init functions
111 */
112 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
113 lcv++) {
114 if (uvmpagerops[lcv]->pgo_init)
115 uvmpagerops[lcv]->pgo_init();
116 }
117 }
118
119 /*
120 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
121 *
122 * we basically just map in a blank map entry to reserve the space in the
123 * map and then use pmap_enter() to put the mappings in by hand.
124 *
125 * XXX It would be nice to know the direction of the I/O, so that we can
126 * XXX map only what is necessary.
127 */
128
129 vaddr_t
130 uvm_pagermapin(pps, npages, waitf)
131 struct vm_page **pps;
132 int npages;
133 int waitf;
134 {
135 vsize_t size;
136 vaddr_t kva;
137 vaddr_t cva;
138 struct vm_page *pp;
139 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
140
141 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, waitf=%d)",
142 pps, npages, waitf, 0);
143
144 ReStart:
145 size = npages << PAGE_SHIFT;
146 kva = NULL; /* let system choose VA */
147
148 if (uvm_map(pager_map, &kva, size, NULL,
149 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
150 if (waitf == M_NOWAIT) {
151 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
152 return(0);
153 }
154 simple_lock(&pager_map_wanted_lock);
155 pager_map_wanted = TRUE;
156 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
157 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
158 "pager_map",0);
159 goto ReStart;
160 }
161
162 /* got it */
163 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
164 pp = *pps++;
165 #ifdef DEBUG
166 if ((pp->flags & PG_BUSY) == 0)
167 panic("uvm_pagermapin: page not busy");
168 #endif
169
170 /*
171 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
172 * XXX really necessary? It could lead to unnecessary
173 * XXX instruction cache flushes.
174 */
175 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
176 VM_PROT_DEFAULT, TRUE,
177 VM_PROT_READ | VM_PROT_WRITE);
178 }
179
180 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
181 return(kva);
182 }
183
184 /*
185 * uvm_pagermapout: remove pager_map mapping
186 *
187 * we remove our mappings by hand and then remove the mapping (waking
188 * up anyone wanting space).
189 */
190
191 void
192 uvm_pagermapout(kva, npages)
193 vaddr_t kva;
194 int npages;
195 {
196 vsize_t size = npages << PAGE_SHIFT;
197 vm_map_entry_t entries;
198 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
199
200 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
201
202 /*
203 * duplicate uvm_unmap, but add in pager_map_wanted handling.
204 */
205
206 vm_map_lock(pager_map);
207 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
208 simple_lock(&pager_map_wanted_lock);
209 if (pager_map_wanted) {
210 pager_map_wanted = FALSE;
211 wakeup(pager_map);
212 }
213 simple_unlock(&pager_map_wanted_lock);
214 vm_map_unlock(pager_map);
215 if (entries)
216 uvm_unmap_detach(entries, 0);
217
218 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
219 }
220
221 /*
222 * uvm_mk_pcluster
223 *
224 * generic "make 'pager put' cluster" function. a pager can either
225 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
226 * generic function, or [3] set it to a pager specific function.
227 *
228 * => caller must lock object _and_ pagequeues (since we need to look
229 * at active vs. inactive bits, etc.)
230 * => caller must make center page busy and write-protect it
231 * => we mark all cluster pages busy for the caller
232 * => the caller must unbusy all pages (and check wanted/released
233 * status if it drops the object lock)
234 * => flags:
235 * PGO_ALLPAGES: all pages in object are valid targets
236 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
237 * PGO_DOACTCLUST: include active pages in cluster.
238 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
239 * PG_CLEANCHK is only a hint, but clearing will help reduce
240 * the number of calls we make to the pmap layer.
241 */
242
243 struct vm_page **
244 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
245 struct uvm_object *uobj; /* IN */
246 struct vm_page **pps, *center; /* IN/OUT, IN */
247 int *npages, flags; /* IN/OUT, IN */
248 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
249 {
250 struct vm_page **ppsp, *pclust;
251 vaddr_t lo, hi, curoff;
252 int center_idx, forward, incr;
253 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
254
255 /*
256 * center page should already be busy and write protected. XXX:
257 * suppose page is wired? if we lock, then a process could
258 * fault/block on it. if we don't lock, a process could write the
259 * pages in the middle of an I/O. (consider an msync()). let's
260 * lock it for now (better to delay than corrupt data?).
261 */
262
263 /*
264 * get cluster boundaries, check sanity, and apply our limits as well.
265 */
266
267 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
268 if ((flags & PGO_ALLPAGES) == 0) {
269 if (lo < mlo)
270 lo = mlo;
271 if (hi > mhi)
272 hi = mhi;
273 }
274 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
275 #ifdef DIAGNOSTIC
276 printf("uvm_mk_pcluster uobj %p npages %d lo 0x%lx hi 0x%lx flags 0x%x\n",
277 uobj, *npages, lo, hi, flags);
278 #endif
279 pps[0] = center;
280 *npages = 1;
281 return(pps);
282 }
283
284 /*
285 * now determine the center and attempt to cluster around the
286 * edges
287 */
288
289 center_idx = (center->offset - lo) >> PAGE_SHIFT;
290 pps[center_idx] = center; /* plug in the center page */
291 ppsp = &pps[center_idx];
292 *npages = 1;
293
294 /*
295 * attempt to cluster around the left [backward], and then
296 * the right side [forward].
297 *
298 * note that for inactive pages (pages that have been deactivated)
299 * there are no valid mappings and PG_CLEAN should be up to date.
300 * [i.e. there is no need to query the pmap with pmap_is_modified
301 * since there are no mappings].
302 */
303
304 for (forward = 0 ; forward <= 1 ; forward++) {
305 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
306 curoff = center->offset + incr;
307 for ( ;(forward == 0 && curoff >= lo) ||
308 (forward && curoff < hi);
309 curoff += incr) {
310
311 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
312 if (pclust == NULL) {
313 break; /* no page */
314 }
315 /* handle active pages */
316 /* NOTE: inactive pages don't have pmap mappings */
317 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
318 if ((flags & PGO_DOACTCLUST) == 0) {
319 /* dont want mapped pages at all */
320 break;
321 }
322
323 /* make sure "clean" bit is sync'd */
324 if ((pclust->flags & PG_CLEANCHK) == 0) {
325 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
326 == PG_CLEAN &&
327 pmap_is_modified(PMAP_PGARG(pclust)))
328 pclust->flags &= ~PG_CLEAN;
329 /* now checked */
330 pclust->flags |= PG_CLEANCHK;
331 }
332 }
333
334 /* is page available for cleaning and does it need it */
335 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
336 break; /* page is already clean or is busy */
337 }
338
339 /* yes! enroll the page in our array */
340 pclust->flags |= PG_BUSY; /* busy! */
341 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
342
343 /* XXX: protect wired page? see above comment. */
344 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
345 if (!forward) {
346 ppsp--; /* back up one page */
347 *ppsp = pclust;
348 } else {
349 /* move forward one page */
350 ppsp[*npages] = pclust;
351 }
352 (*npages)++;
353 }
354 }
355
356 /*
357 * done! return the cluster array to the caller!!!
358 */
359
360 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
361 return(ppsp);
362 }
363
364
365 /*
366 * uvm_shareprot: generic share protect routine
367 *
368 * => caller must lock map entry's map
369 * => caller must lock object pointed to by map entry
370 */
371
372 void
373 uvm_shareprot(entry, prot)
374 vm_map_entry_t entry;
375 vm_prot_t prot;
376 {
377 struct uvm_object *uobj = entry->object.uvm_obj;
378 struct vm_page *pp;
379 vaddr_t start, stop;
380 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
381
382 if (UVM_ET_ISSUBMAP(entry))
383 panic("uvm_shareprot: non-object attached");
384
385 start = entry->offset;
386 stop = start + (entry->end - entry->start);
387
388 /*
389 * traverse list of pages in object. if page in range, pmap_prot it
390 */
391
392 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
393 if (pp->offset >= start && pp->offset < stop)
394 pmap_page_protect(PMAP_PGARG(pp), prot);
395 }
396 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
397 }
398
399 /*
400 * uvm_pager_put: high level pageout routine
401 *
402 * we want to pageout page "pg" to backing store, clustering if
403 * possible.
404 *
405 * => page queues must be locked by caller
406 * => if page is not swap-backed, then "uobj" points to the object
407 * backing it. this object should be locked by the caller.
408 * => if page is swap-backed, then "uobj" should be NULL.
409 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
410 * for swap-backed memory, "pg" can be NULL if there is no page
411 * of interest [sometimes the case for the pagedaemon]
412 * => "ppsp_ptr" should point to an array of npages vm_page pointers
413 * for possible cluster building
414 * => flags (first two for non-swap-backed pages)
415 * PGO_ALLPAGES: all pages in uobj are valid targets
416 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
417 * PGO_SYNCIO: do SYNC I/O (no async)
418 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
419 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
420 * if (!uobj) start is the (daddr_t) of the starting swapblk
421 * => return state:
422 * 1. we return the VM_PAGER status code of the pageout
423 * 2. we return with the page queues unlocked
424 * 3. if (uobj != NULL) [!swap_backed] we return with
425 * uobj locked _only_ if PGO_PDFREECLUST is set
426 * AND result != VM_PAGER_PEND. in all other cases
427 * we return with uobj unlocked. [this is a hack
428 * that allows the pagedaemon to save one lock/unlock
429 * pair in the !swap_backed case since we have to
430 * lock the uobj to drop the cluster anyway]
431 * 4. on errors we always drop the cluster. thus, if we return
432 * !PEND, !OK, then the caller only has to worry about
433 * un-busying the main page (not the cluster pages).
434 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
435 * with all pages busy (caller must un-busy and check
436 * wanted/released flags).
437 */
438
439 int
440 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
441 struct uvm_object *uobj; /* IN */
442 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
443 int *npages; /* IN/OUT */
444 int flags; /* IN */
445 vaddr_t start, stop; /* IN, IN */
446 {
447 int result;
448 daddr_t swblk;
449 struct vm_page **ppsp = *ppsp_ptr;
450
451 /*
452 * note that uobj is null if we are doing a swap-backed pageout.
453 * note that uobj is !null if we are doing normal object pageout.
454 * note that the page queues must be locked to cluster.
455 */
456
457 if (uobj) { /* if !swap-backed */
458
459 /*
460 * attempt to build a cluster for pageout using its
461 * make-put-cluster function (if it has one).
462 */
463
464 if (uobj->pgops->pgo_mk_pcluster) {
465 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
466 npages, pg, flags, start, stop);
467 *ppsp_ptr = ppsp; /* update caller's pointer */
468 } else {
469 ppsp[0] = pg;
470 *npages = 1;
471 }
472
473 swblk = 0; /* XXX: keep gcc happy */
474
475 } else {
476
477 /*
478 * for swap-backed pageout, the caller (the pagedaemon) has
479 * already built the cluster for us. the starting swap
480 * block we are writing to has been passed in as "start."
481 * "pg" could be NULL if there is no page we are especially
482 * interested in (in which case the whole cluster gets dropped
483 * in the event of an error or a sync "done").
484 */
485 swblk = (daddr_t) start;
486 /* ppsp and npages should be ok */
487 }
488
489 /* now that we've clustered we can unlock the page queues */
490 uvm_unlock_pageq();
491
492 /*
493 * now attempt the I/O. if we have a failure and we are
494 * clustered, we will drop the cluster and try again.
495 */
496
497 ReTry:
498 if (uobj) {
499 /* object is locked */
500 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED);
501 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
502 /* object is now unlocked */
503 simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED);
504 } else {
505 /* nothing locked */
506 result = uvm_swap_put(swblk, ppsp, *npages, flags);
507 /* nothing locked */
508 }
509
510 /*
511 * we have attempted the I/O.
512 *
513 * if the I/O was a success then:
514 * if !PGO_PDFREECLUST, we return the cluster to the
515 * caller (who must un-busy all pages)
516 * else we un-busy cluster pages for the pagedaemon
517 *
518 * if I/O is pending (async i/o) then we return the pending code.
519 * [in this case the async i/o done function must clean up when
520 * i/o is done...]
521 */
522
523 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
524 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
525 /*
526 * drop cluster and relock object (only if I/O is
527 * not pending)
528 */
529 if (uobj)
530 /* required for dropcluster */
531 simple_lock(&uobj->vmobjlock);
532 if (*npages > 1 || pg == NULL)
533 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
534 PGO_PDFREECLUST, 0);
535 /* if (uobj): object still locked, as per
536 * return-state item #3 */
537 }
538 return (result);
539 }
540
541 /*
542 * a pager error occured. if we have clustered, we drop the
543 * cluster and try again.
544 */
545
546 if (*npages > 1 || pg == NULL) {
547 if (uobj)
548 simple_lock(&uobj->vmobjlock);
549 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
550 swblk);
551 if (pg != NULL)
552 goto ReTry;
553 }
554
555 /*
556 * a pager error occured (even after dropping the cluster, if there
557 * was one). give up! the caller only has one page ("pg")
558 * to worry about.
559 */
560
561 if (uobj && (flags & PGO_PDFREECLUST) != 0)
562 simple_lock(&uobj->vmobjlock);
563 return(result);
564 }
565
566 /*
567 * uvm_pager_dropcluster: drop a cluster we have built (because we
568 * got an error, or, if PGO_PDFREECLUST we are un-busying the
569 * cluster pages on behalf of the pagedaemon).
570 *
571 * => uobj, if non-null, is a non-swap-backed object that is
572 * locked by the caller. we return with this object still
573 * locked.
574 * => page queues are not locked
575 * => pg is our page of interest (the one we clustered around, can be null)
576 * => ppsp/npages is our current cluster
577 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
578 * pages on behalf of the pagedaemon.
579 * PGO_REALLOCSWAP: drop previously allocated swap slots for
580 * clustered swap-backed pages (except for "pg" if !NULL)
581 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
582 * [only meaningful if swap-backed (uobj == NULL)]
583 */
584
585
586 void
587 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
588 struct uvm_object *uobj; /* IN */
589 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
590 int *npages; /* IN/OUT */
591 int flags;
592 int swblk; /* valid if (uobj == NULL &&
593 PGO_REALLOCSWAP) */
594 {
595 int lcv;
596 boolean_t obj_is_alive;
597 struct uvm_object *saved_uobj;
598
599 /*
600 * if we need to reallocate swap space for the cluster we are dropping
601 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
602 * allocation now. save a block for "pg" if it is non-NULL.
603 *
604 * note that we will zap the object's pointer to swap in the "for" loop
605 * below...
606 */
607
608 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
609 if (pg)
610 uvm_swap_free(swblk + 1, *npages - 1);
611 else
612 uvm_swap_free(swblk, *npages);
613 }
614
615 /*
616 * drop all pages but "pg"
617 */
618
619 for (lcv = 0 ; lcv < *npages ; lcv++) {
620
621 /* skip "pg" or empty slot */
622 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
623 continue;
624
625 /*
626 * if swap-backed, gain lock on object that owns page. note
627 * that PQ_ANON bit can't change as long as we are holding
628 * the PG_BUSY bit (so there is no need to lock the page
629 * queues to test it).
630 *
631 * once we have the lock, dispose of the pointer to swap, if
632 * requested
633 */
634 if (!uobj) {
635 if (ppsp[lcv]->pqflags & PQ_ANON) {
636 simple_lock(&ppsp[lcv]->uanon->an_lock);
637 if (flags & PGO_REALLOCSWAP)
638 /* zap swap block */
639 ppsp[lcv]->uanon->an_swslot = 0;
640 } else {
641 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
642 if (flags & PGO_REALLOCSWAP)
643 uao_set_swslot(ppsp[lcv]->uobject,
644 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
645 }
646 }
647
648 /* did someone want the page while we had it busy-locked? */
649 if (ppsp[lcv]->flags & PG_WANTED) {
650 /* still holding obj lock */
651 wakeup(ppsp[lcv]);
652 }
653
654 /* if page was released, release it. otherwise un-busy it */
655 if (ppsp[lcv]->flags & PG_RELEASED) {
656
657 if (ppsp[lcv]->pqflags & PQ_ANON) {
658 /* so that anfree will free */
659 ppsp[lcv]->flags &= ~(PG_BUSY);
660 UVM_PAGE_OWN(ppsp[lcv], NULL);
661
662 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
663 VM_PROT_NONE); /* be safe */
664 simple_unlock(&ppsp[lcv]->uanon->an_lock);
665 /* kills anon and frees pg */
666 uvm_anfree(ppsp[lcv]->uanon);
667
668 continue;
669 }
670
671 /*
672 * pgo_releasepg will dump the page for us
673 */
674
675 #ifdef DIAGNOSTIC
676 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
677 panic("uvm_pager_dropcluster: no releasepg "
678 "function");
679 #endif
680 saved_uobj = ppsp[lcv]->uobject;
681 obj_is_alive =
682 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
683
684 #ifdef DIAGNOSTIC
685 /* for normal objects, "pg" is still PG_BUSY by us,
686 * so obj can't die */
687 if (uobj && !obj_is_alive)
688 panic("uvm_pager_dropcluster: object died "
689 "with active page");
690 #endif
691 /* only unlock the object if it is still alive... */
692 if (obj_is_alive && saved_uobj != uobj)
693 simple_unlock(&saved_uobj->vmobjlock);
694
695 /*
696 * XXXCDC: suppose uobj died in the pgo_releasepg?
697 * how pass that
698 * info up to caller. we are currently ignoring it...
699 */
700
701 continue; /* next page */
702
703 } else {
704 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
705 UVM_PAGE_OWN(ppsp[lcv], NULL);
706 }
707
708 /*
709 * if we are operating on behalf of the pagedaemon and we
710 * had a successful pageout update the page!
711 */
712 if (flags & PGO_PDFREECLUST) {
713 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
714 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
715 ppsp[lcv]->flags |= PG_CLEAN;
716 }
717
718 /* if anonymous cluster, unlock object and move on */
719 if (!uobj) {
720 if (ppsp[lcv]->pqflags & PQ_ANON)
721 simple_unlock(&ppsp[lcv]->uanon->an_lock);
722 else
723 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
724 }
725
726 }
727
728 /*
729 * drop to a cluster of 1 page ("pg") if requested
730 */
731
732 if (pg && (flags & PGO_PDFREECLUST) == 0) {
733 /*
734 * if we are not a successful pageout, we make a 1 page cluster.
735 */
736 ppsp[0] = pg;
737 *npages = 1;
738
739 /*
740 * assign new swap block to new cluster, if anon backed
741 */
742 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
743 if (pg->pqflags & PQ_ANON) {
744 simple_lock(&pg->uanon->an_lock);
745 pg->uanon->an_swslot = swblk; /* reassign */
746 simple_unlock(&pg->uanon->an_lock);
747 } else {
748 simple_lock(&pg->uobject->vmobjlock);
749 uao_set_swslot(pg->uobject,
750 pg->offset >> PAGE_SHIFT, swblk);
751 simple_unlock(&pg->uobject->vmobjlock);
752 }
753 }
754 }
755 }
756
757 void
758 uvm_aio_biodone1(bp)
759 struct buf *bp;
760 {
761 struct buf *mbp = bp->b_private;
762
763 if (mbp == bp) {
764 panic("uvm_aio_biodone1: mbp == bp %p", bp);
765 }
766
767 if (bp->b_flags & B_ERROR) {
768 mbp->b_flags |= B_ERROR;
769 mbp->b_error = bp->b_error;
770 }
771 mbp->b_bcount -= bp->b_bcount;
772 pool_put(&bufpool, bp);
773 if (mbp->b_bcount == 0) {
774 biodone(mbp);
775 }
776 }
777
778 void
779 uvm_aio_biodone(bp)
780 struct buf *bp;
781 {
782 /* XXX for single-buf aios */
783 bp->b_iodone = uvm_aio_aiodone;
784
785 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
786 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
787 wakeup(&uvm.aiodoned);
788 simple_unlock(&uvm.aiodoned_lock);
789 }
790
791 void
792 uvm_aio_aiodone(bp)
793 struct buf *bp;
794 {
795 int pages = bp->b_bufsize >> PAGE_SHIFT;
796 struct vm_page *pgs[pages];
797 int s, i;
798 boolean_t release;
799
800 release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
801 for (i = 0; i < pages; i++) {
802 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
803
804 /*
805 * if this is an async read and we got an error,
806 * mark the pages PG_RELEASED so that uvm_pager_dropcluster()
807 * will free them.
808 */
809
810 if (release) {
811 pgs[i]->flags |= PG_RELEASED;
812 }
813 }
814 uvm_pagermapout((vaddr_t)bp->b_data, pages);
815 uvm_pager_dropcluster((struct uvm_object *)bp->b_vp, NULL, pgs,
816 &pages, PGO_PDFREECLUST, 0);
817
818 s = splbio();
819 pool_put(&bufpool, bp);
820 splx(s);
821 }
822