uvm_pager.c revision 1.13.2.2 1 /* $NetBSD: uvm_pager.c,v 1.13.2.2 1999/02/25 04:23:54 chs Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
39 */
40
41 #include "opt_pmap_new.h"
42
43 /*
44 * uvm_pager.c: generic functions used to assist the pagers.
45 */
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56
57 #define UVM_PAGER
58 #include <uvm/uvm.h>
59
60 struct pool *uvm_aiobuf_pool;
61
62 /*
63 * list of uvm pagers in the system
64 */
65
66 extern struct uvm_pagerops aobj_pager;
67 extern struct uvm_pagerops uvm_deviceops;
68 extern struct uvm_pagerops uvm_vnodeops;
69 #ifdef UBC
70 extern struct uvm_pagerops ubc_pager;
71 #endif
72
73 struct uvm_pagerops *uvmpagerops[] = {
74 &aobj_pager,
75 &uvm_deviceops,
76 &uvm_vnodeops,
77 #ifdef UBC
78 &ubc_pager,
79 #endif
80 };
81
82 /*
83 * the pager map: provides KVA for I/O
84 */
85
86 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
87 vm_map_t pager_map; /* XXX */
88 simple_lock_data_t pager_map_wanted_lock;
89 boolean_t pager_map_wanted; /* locked by pager map */
90
91
92 /*
93 * uvm_pager_init: init pagers (at boot time)
94 */
95
96 void
97 uvm_pager_init()
98 {
99 int lcv;
100
101 /*
102 * init pager map
103 */
104
105 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
106 PAGER_MAP_SIZE, FALSE, FALSE, NULL);
107 simple_lock_init(&pager_map_wanted_lock);
108 pager_map_wanted = FALSE;
109
110 /*
111 * init ASYNC I/O queue
112 */
113
114 TAILQ_INIT(&uvm.aio_done);
115 uvm_aiobuf_pool = pool_create(sizeof(struct uvm_aiobuf),
116 0, 0, 0, "aiobuf", 0, NULL, NULL, 0);
117
118
119 /*
120 * call pager init functions
121 */
122 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
123 lcv++) {
124 if (uvmpagerops[lcv]->pgo_init)
125 uvmpagerops[lcv]->pgo_init();
126 }
127 }
128
129 /*
130 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
131 *
132 * we basically just map in a blank map entry to reserve the space in the
133 * map and then use pmap_enter() to put the mappings in by hand.
134 */
135
136 vaddr_t
137 uvm_pagermapin(pps, npages, aiop, waitf)
138 struct vm_page **pps;
139 int npages;
140 struct uvm_aiodesc **aiop; /* OUT */
141 int waitf;
142 {
143 vsize_t size;
144 vaddr_t kva;
145 struct uvm_aiodesc *aio;
146 #if !defined(PMAP_NEW)
147 vaddr_t cva;
148 struct vm_page *pp;
149 #endif
150 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
151
152 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
153 pps, npages, aiop, waitf);
154
155 ReStart:
156 if (aiop) {
157 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
158 if (aio == NULL)
159 return(0);
160 *aiop = aio;
161 } else {
162 aio = NULL;
163 }
164
165 size = npages << PAGE_SHIFT;
166 kva = NULL; /* let system choose VA */
167
168 if (uvm_map(pager_map, &kva, size, NULL,
169 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
170 if (waitf == M_NOWAIT) {
171 if (aio)
172 FREE(aio, M_TEMP);
173 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
174 return(NULL);
175 }
176 simple_lock(&pager_map_wanted_lock);
177 pager_map_wanted = TRUE;
178 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
179 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
180 "pager_map",0);
181 goto ReStart;
182 }
183
184 #if defined(PMAP_NEW)
185 /*
186 * XXX: (ab)using the pmap module to store state info for us.
187 * (pmap stores the PAs... we fetch them back later and convert back
188 * to pages with PHYS_TO_VM_PAGE).
189 */
190 pmap_kenter_pgs(kva, pps, npages);
191
192 #else /* PMAP_NEW */
193
194 /* got it */
195 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
196 pp = *pps++;
197 #ifdef DEBUG
198 if ((pp->flags & PG_BUSY) == 0)
199 panic("uvm_pagermapin: page not busy");
200 #endif
201
202 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
203 VM_PROT_DEFAULT, TRUE);
204 }
205
206 #endif /* PMAP_NEW */
207
208 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
209 return(kva);
210 }
211
212 /*
213 * uvm_pagermapout: remove pager_map mapping
214 *
215 * we remove our mappings by hand and then remove the mapping (waking
216 * up anyone wanting space).
217 */
218
219 void
220 uvm_pagermapout(kva, npages)
221 vaddr_t kva;
222 int npages;
223 {
224 vsize_t size = npages << PAGE_SHIFT;
225 vm_map_entry_t entries;
226 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
227
228 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
229
230 /*
231 * duplicate uvm_unmap, but add in pager_map_wanted handling.
232 */
233
234 vm_map_lock(pager_map);
235 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
236 simple_lock(&pager_map_wanted_lock);
237 if (pager_map_wanted) {
238 pager_map_wanted = FALSE;
239 wakeup(pager_map);
240 }
241 simple_unlock(&pager_map_wanted_lock);
242 vm_map_unlock(pager_map);
243 if (entries)
244 uvm_unmap_detach(entries, 0);
245
246 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
247 }
248
249 /*
250 * uvm_mk_pcluster
251 *
252 * generic "make 'pager put' cluster" function. a pager can either
253 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
254 * generic function, or [3] set it to a pager specific function.
255 *
256 * => caller must lock object _and_ pagequeues (since we need to look
257 * at active vs. inactive bits, etc.)
258 * => caller must make center page busy and write-protect it
259 * => we mark all cluster pages busy for the caller
260 * => the caller must unbusy all pages (and check wanted/released
261 * status if it drops the object lock)
262 * => flags:
263 * PGO_ALLPAGES: all pages in object are valid targets
264 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
265 * PGO_DOACTCLUST: include active pages in cluster.
266 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
267 * PG_CLEANCHK is only a hint, but clearing will help reduce
268 * the number of calls we make to the pmap layer.
269 */
270
271 struct vm_page **
272 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
273 struct uvm_object *uobj; /* IN */
274 struct vm_page **pps, *center; /* IN/OUT, IN */
275 int *npages, flags; /* IN/OUT, IN */
276 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
277 {
278 struct vm_page **ppsp, *pclust;
279 vaddr_t lo, hi, curoff;
280 int center_idx, forward, incr;
281 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
282
283 /*
284 * center page should already be busy and write protected. XXX:
285 * suppose page is wired? if we lock, then a process could
286 * fault/block on it. if we don't lock, a process could write the
287 * pages in the middle of an I/O. (consider an msync()). let's
288 * lock it for now (better to delay than corrupt data?).
289 */
290
291 /*
292 * get cluster boundaries, check sanity, and apply our limits as well.
293 */
294
295 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
296 if ((flags & PGO_ALLPAGES) == 0) {
297 if (lo < mlo)
298 lo = mlo;
299 if (hi > mhi)
300 hi = mhi;
301 }
302 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
303 #ifdef DIAGNOSTIC
304 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
305 #endif
306 pps[0] = center;
307 *npages = 1;
308 return(pps);
309 }
310
311 /*
312 * now determine the center and attempt to cluster around the
313 * edges
314 */
315
316 center_idx = (center->offset - lo) >> PAGE_SHIFT;
317 pps[center_idx] = center; /* plug in the center page */
318 ppsp = &pps[center_idx];
319 *npages = 1;
320
321 /*
322 * attempt to cluster around the left [backward], and then
323 * the right side [forward].
324 *
325 * note that for inactive pages (pages that have been deactivated)
326 * there are no valid mappings and PG_CLEAN should be up to date.
327 * [i.e. there is no need to query the pmap with pmap_is_modified
328 * since there are no mappings].
329 */
330
331 for (forward = 0 ; forward <= 1 ; forward++) {
332 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
333 curoff = center->offset + incr;
334 for ( ;(forward == 0 && curoff >= lo) ||
335 (forward && curoff < hi);
336 curoff += incr) {
337
338 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
339 if (pclust == NULL) {
340 break; /* no page */
341 }
342 /* handle active pages */
343 /* NOTE: inactive pages don't have pmap mappings */
344 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
345 if ((flags & PGO_DOACTCLUST) == 0) {
346 /* dont want mapped pages at all */
347 break;
348 }
349
350 /* make sure "clean" bit is sync'd */
351 if ((pclust->flags & PG_CLEANCHK) == 0) {
352 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
353 == PG_CLEAN &&
354 pmap_is_modified(PMAP_PGARG(pclust)))
355 pclust->flags &= ~PG_CLEAN;
356 /* now checked */
357 pclust->flags |= PG_CLEANCHK;
358 }
359 }
360
361 /* is page available for cleaning and does it need it */
362 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
363 break; /* page is already clean or is busy */
364 }
365
366 #ifdef UBC
367 /* XXX assumes blkno in units of DEV_BSIZE */
368 /* check physical adjacency too */
369 if (pclust->blkno != center->blkno +
370 ((pclust->offset - center->offset) >> DEV_BSHIFT)) {
371 break;
372 }
373 #endif
374
375 /* yes! enroll the page in our array */
376 pclust->flags |= PG_BUSY; /* busy! */
377 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
378
379 /* XXX: protect wired page? see above comment. */
380 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
381 if (!forward) {
382 ppsp--; /* back up one page */
383 *ppsp = pclust;
384 } else {
385 /* move forward one page */
386 ppsp[*npages] = pclust;
387 }
388 (*npages)++;
389 }
390 }
391
392 /*
393 * done! return the cluster array to the caller!!!
394 */
395
396 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
397 return(ppsp);
398 }
399
400
401 /*
402 * uvm_shareprot: generic share protect routine
403 *
404 * => caller must lock map entry's map
405 * => caller must lock object pointed to by map entry
406 */
407
408 void
409 uvm_shareprot(entry, prot)
410 vm_map_entry_t entry;
411 vm_prot_t prot;
412 {
413 struct uvm_object *uobj = entry->object.uvm_obj;
414 struct vm_page *pp;
415 vaddr_t start, stop;
416 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
417
418 if (UVM_ET_ISSUBMAP(entry))
419 panic("uvm_shareprot: non-object attached");
420
421 start = entry->offset;
422 stop = start + (entry->end - entry->start);
423
424 /*
425 * traverse list of pages in object. if page in range, pmap_prot it
426 */
427
428 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
429 if (pp->offset >= start && pp->offset < stop)
430 pmap_page_protect(PMAP_PGARG(pp), prot);
431 }
432 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
433 }
434
435 /*
436 * uvm_pager_put: high level pageout routine
437 *
438 * we want to pageout page "pg" to backing store, clustering if
439 * possible.
440 *
441 * => page queues must be locked by caller
442 * => if page is not swap-backed, then "uobj" points to the object
443 * backing it. this object should be locked by the caller.
444 * => if page is swap-backed, then "uobj" should be NULL.
445 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
446 * for swap-backed memory, "pg" can be NULL if there is no page
447 * of interest [sometimes the case for the pagedaemon]
448 * => "ppsp_ptr" should point to an array of npages vm_page pointers
449 * for possible cluster building
450 * => flags (first two for non-swap-backed pages)
451 * PGO_ALLPAGES: all pages in uobj are valid targets
452 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
453 * PGO_SYNCIO: do SYNC I/O (no async)
454 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
455 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
456 * if (!uobj) start is the (daddr_t) of the starting swapblk
457 * => return state:
458 * 1. we return the VM_PAGER status code of the pageout
459 * 2. we return with the page queues unlocked
460 * 3. if (uobj != NULL) [!swap_backed] we return with
461 * uobj locked _only_ if PGO_PDFREECLUST is set
462 * AND result != VM_PAGER_PEND. in all other cases
463 * we return with uobj unlocked. [this is a hack
464 * that allows the pagedaemon to save one lock/unlock
465 * pair in the !swap_backed case since we have to
466 * lock the uobj to drop the cluster anyway]
467 * 4. on errors we always drop the cluster. thus, if we return
468 * !PEND, !OK, then the caller only has to worry about
469 * un-busying the main page (not the cluster pages).
470 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
471 * with all pages busy (caller must un-busy and check
472 * wanted/released flags).
473 */
474
475 int
476 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
477 struct uvm_object *uobj; /* IN */
478 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
479 int *npages; /* IN/OUT */
480 int flags; /* IN */
481 vaddr_t start, stop; /* IN, IN */
482 {
483 int result;
484 daddr_t swblk;
485 struct vm_page **ppsp = *ppsp_ptr;
486
487 /*
488 * note that uobj is null if we are doing a swap-backed pageout.
489 * note that uobj is !null if we are doing normal object pageout.
490 * note that the page queues must be locked to cluster.
491 */
492
493 if (uobj) { /* if !swap-backed */
494
495 /*
496 * attempt to build a cluster for pageout using its
497 * make-put-cluster function (if it has one).
498 */
499
500 if (uobj->pgops->pgo_mk_pcluster) {
501 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
502 npages, pg, flags, start, stop);
503 *ppsp_ptr = ppsp; /* update caller's pointer */
504 } else {
505 ppsp[0] = pg;
506 *npages = 1;
507 }
508
509 swblk = 0; /* XXX: keep gcc happy */
510
511 } else {
512
513 /*
514 * for swap-backed pageout, the caller (the pagedaemon) has
515 * already built the cluster for us. the starting swap
516 * block we are writing to has been passed in as "start."
517 * "pg" could be NULL if there is no page we are especially
518 * interested in (in which case the whole cluster gets dropped
519 * in the event of an error or a sync "done").
520 */
521 swblk = (daddr_t) start;
522 /* ppsp and npages should be ok */
523 }
524
525 /* now that we've clustered we can unlock the page queues */
526 uvm_unlock_pageq();
527
528 /*
529 * now attempt the I/O. if we have a failure and we are
530 * clustered, we will drop the cluster and try again.
531 */
532
533 ReTry:
534 if (uobj) {
535 /* object is locked */
536 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED);
537 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
538 /* object is now unlocked */
539 simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED);
540 } else {
541 /* nothing locked */
542 /* XXX should we pass more than just PGO_SYNCIO here too? */
543 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
544 /* nothing locked */
545 }
546
547 /*
548 * we have attempted the I/O.
549 *
550 * if the I/O was a success then:
551 * if !PGO_PDFREECLUST, we return the cluster to the
552 * caller (who must un-busy all pages)
553 * else we un-busy cluster pages for the pagedaemon
554 *
555 * if I/O is pending (async i/o) then we return the pending code.
556 * [in this case the async i/o done function must clean up when
557 * i/o is done...]
558 */
559
560 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
561 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
562 /*
563 * drop cluster and relock object (only if I/O is
564 * not pending)
565 */
566 if (uobj)
567 /* required for dropcluster */
568 simple_lock(&uobj->vmobjlock);
569 if (*npages > 1 || pg == NULL)
570 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
571 PGO_PDFREECLUST, 0);
572 /* if (uobj): object still locked, as per
573 * return-state item #3 */
574 }
575 if (result == VM_PAGER_PEND) {
576 /* XXX uvmexp.paging needs spinlock */
577 /*
578 * XXX also, uvmexp.paging could briefly be negative
579 * if the iodone handler runs before we bump the
580 * counter here.
581 */
582 uvmexp.paging += *npages;
583 }
584 return (result);
585 }
586
587 /*
588 * a pager error occured. if we have clustered, we drop the
589 * cluster and try again.
590 */
591
592 if (*npages > 1 || pg == NULL) {
593 if (uobj)
594 simple_lock(&uobj->vmobjlock);
595 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
596 swblk);
597 if (pg != NULL)
598 goto ReTry;
599 }
600
601 /*
602 * a pager error occured (even after dropping the cluster, if there
603 * was one). give up! the caller only has one page ("pg")
604 * to worry about.
605 */
606
607 if (uobj && (flags & PGO_PDFREECLUST) != 0)
608 simple_lock(&uobj->vmobjlock);
609 return(result);
610 }
611
612 /*
613 * uvm_pager_dropcluster: drop a cluster we have built (because we
614 * got an error, or, if PGO_PDFREECLUST we are un-busying the
615 * cluster pages on behalf of the pagedaemon).
616 *
617 * => uobj, if non-null, is a non-swap-backed object that is
618 * locked by the caller. we return with this object still
619 * locked.
620 * => page queues are not locked
621 * => pg is our page of interest (the one we clustered around, can be null)
622 * => ppsp/npages is our current cluster
623 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
624 * pages on behalf of the pagedaemon.
625 * PGO_REALLOCSWAP: drop previously allocated swap slots for
626 * clustered swap-backed pages (except for "pg" if !NULL)
627 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
628 * [only meaningful if swap-backed (uobj == NULL)]
629 */
630
631
632 void
633 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
634 struct uvm_object *uobj; /* IN */
635 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
636 int *npages; /* IN/OUT */
637 int flags;
638 int swblk; /* valid if (uobj == NULL &&
639 PGO_REALLOCSWAP) */
640 {
641 int lcv;
642 boolean_t obj_is_alive;
643 struct uvm_object *saved_uobj;
644
645 /*
646 * if we need to reallocate swap space for the cluster we are dropping
647 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
648 * allocation now. save a block for "pg" if it is non-NULL.
649 *
650 * note that we will zap the object's pointer to swap in the "for" loop
651 * below...
652 */
653
654 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
655 if (pg)
656 uvm_swap_free(swblk + 1, *npages - 1);
657 else
658 uvm_swap_free(swblk, *npages);
659 }
660
661 /*
662 * drop all pages but "pg"
663 */
664
665 for (lcv = 0 ; lcv < *npages ; lcv++) {
666
667 /* skip "pg" or empty slot */
668 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
669 continue;
670
671 /*
672 * if swap-backed, gain lock on object that owns page. note
673 * that PQ_ANON bit can't change as long as we are holding
674 * the PG_BUSY bit (so there is no need to lock the page
675 * queues to test it).
676 *
677 * once we have the lock, dispose of the pointer to swap, if
678 * requested
679 */
680 if (!uobj) {
681 if (ppsp[lcv]->pqflags & PQ_ANON) {
682 simple_lock(&ppsp[lcv]->uanon->an_lock);
683 if (flags & PGO_REALLOCSWAP)
684 /* zap swap block */
685 ppsp[lcv]->uanon->an_swslot = 0;
686 } else {
687 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
688 if (flags & PGO_REALLOCSWAP)
689 uao_set_swslot(ppsp[lcv]->uobject,
690 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
691 }
692 }
693
694 /* did someone want the page while we had it busy-locked? */
695 if (ppsp[lcv]->flags & PG_WANTED) {
696 /* still holding obj lock */
697 wakeup(ppsp[lcv]);
698 }
699
700 /* if page was released, release it. otherwise un-busy it */
701 if (ppsp[lcv]->flags & PG_RELEASED) {
702
703 if (ppsp[lcv]->pqflags & PQ_ANON) {
704 /* so that anfree will free */
705 ppsp[lcv]->flags &= ~(PG_BUSY);
706 UVM_PAGE_OWN(ppsp[lcv], NULL);
707
708 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
709 VM_PROT_NONE); /* be safe */
710 simple_unlock(&ppsp[lcv]->uanon->an_lock);
711 /* kills anon and frees pg */
712 uvm_anfree(ppsp[lcv]->uanon);
713
714 continue;
715 }
716
717 /*
718 * pgo_releasepg will dump the page for us
719 */
720
721 #ifdef DIAGNOSTIC
722 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
723 panic("uvm_pager_dropcluster: no releasepg "
724 "function");
725 #endif
726 saved_uobj = ppsp[lcv]->uobject;
727 obj_is_alive =
728 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
729
730 #ifdef DIAGNOSTIC
731 /* for normal objects, "pg" is still PG_BUSY by us,
732 * so obj can't die */
733 if (uobj && !obj_is_alive)
734 panic("uvm_pager_dropcluster: object died "
735 "with active page");
736 #endif
737 /* only unlock the object if it is still alive... */
738 if (obj_is_alive && saved_uobj != uobj)
739 simple_unlock(&saved_uobj->vmobjlock);
740
741 /*
742 * XXXCDC: suppose uobj died in the pgo_releasepg?
743 * how pass that
744 * info up to caller. we are currently ignoring it...
745 */
746
747 continue; /* next page */
748
749 } else {
750 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
751 UVM_PAGE_OWN(ppsp[lcv], NULL);
752 }
753
754 /*
755 * if we are operating on behalf of the pagedaemon and we
756 * had a successful pageout update the page!
757 */
758 if (flags & PGO_PDFREECLUST) {
759 /* XXX: with PMAP_NEW ref should already be clear,
760 * but don't trust! */
761 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
762 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
763 ppsp[lcv]->flags |= PG_CLEAN;
764 }
765
766 /* if anonymous cluster, unlock object and move on */
767 if (!uobj) {
768 if (ppsp[lcv]->pqflags & PQ_ANON)
769 simple_unlock(&ppsp[lcv]->uanon->an_lock);
770 else
771 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
772 }
773
774 }
775
776 /*
777 * drop to a cluster of 1 page ("pg") if requested
778 */
779
780 if (pg && (flags & PGO_PDFREECLUST) == 0) {
781 /*
782 * if we are not a successful pageout, we make a 1 page cluster.
783 */
784 ppsp[0] = pg;
785 *npages = 1;
786
787 /*
788 * assign new swap block to new cluster, if anon backed
789 */
790 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
791 if (pg->pqflags & PQ_ANON) {
792 simple_lock(&pg->uanon->an_lock);
793 pg->uanon->an_swslot = swblk; /* reassign */
794 simple_unlock(&pg->uanon->an_lock);
795 } else {
796 simple_lock(&pg->uobject->vmobjlock);
797 uao_set_swslot(pg->uobject,
798 pg->offset >> PAGE_SHIFT, swblk);
799 simple_unlock(&pg->uobject->vmobjlock);
800 }
801 }
802 }
803 }
804
805 void
806 uvm_aio_biodone(bp)
807 struct buf *bp;
808 {
809 struct uvm_aiobuf *abp = (void *)bp;
810 int s;
811
812 s = splbio();
813 simple_lock(&uvm.pagedaemon_lock); /* locks uvm.aio_done */
814 TAILQ_INSERT_TAIL(&uvm.aio_done, &abp->aio, aioq);
815 wakeup(&uvm.pagedaemon);
816 simple_unlock(&uvm.pagedaemon_lock);
817 splx(s);
818 }
819
820 void
821 uvm_aio_aiodone(aio)
822 struct uvm_aiodesc *aio;
823 {
824 struct uvm_aiobuf *abp = aio->pd_ptr;
825 struct vm_page *pgs[aio->npages];
826 struct uvm_object *uobj;
827 struct vnode *vp;
828 int s, i;
829
830 for (i = 0; i < aio->npages; i++) {
831 pgs[i] = uvm_pageratop(aio->kva + (i << PAGE_SHIFT));
832 }
833 uvm_pagermapout(aio->kva, aio->npages);
834
835 uobj = pgs[0]->uobject;
836 vp = (struct vnode *)uobj;
837
838 /* XXX why don't we need to do this? */
839 #if 0
840 vp->v_numoutput--;
841 if (vp->v_flag & VBWAIT && vp->v_numoutput == 0) {
842 vp->v_flag &= ~VBWAIT;
843 wakeup(&vp->v_numoutput);
844 }
845 if (vp->v_numoutput < 0) {
846 panic("uvn_aiodone: neg numoutput vp %p", vp);
847 }
848 #endif
849
850 uvm_pager_dropcluster(uobj, NULL, pgs, &aio->npages,
851 PGO_PDFREECLUST, 0);
852
853 s = splbio();
854 pool_put(uvm_aiobuf_pool, abp);
855 splx(s);
856 }
857