uvm_pager.c revision 1.27 1 /* $NetBSD: uvm_pager.c,v 1.27 2000/03/30 02:49:55 simonb Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_kern.h>
51
52 #define UVM_PAGER
53 #include <uvm/uvm.h>
54
55 /*
56 * list of uvm pagers in the system
57 */
58
59 extern struct uvm_pagerops uvm_deviceops;
60 extern struct uvm_pagerops uvm_vnodeops;
61
62 struct uvm_pagerops *uvmpagerops[] = {
63 &aobj_pager,
64 &uvm_deviceops,
65 &uvm_vnodeops,
66 };
67
68 /*
69 * the pager map: provides KVA for I/O
70 */
71
72 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
73 vm_map_t pager_map; /* XXX */
74 simple_lock_data_t pager_map_wanted_lock;
75 boolean_t pager_map_wanted; /* locked by pager map */
76
77
78 /*
79 * uvm_pager_init: init pagers (at boot time)
80 */
81
82 void
83 uvm_pager_init()
84 {
85 int lcv;
86
87 /*
88 * init pager map
89 */
90
91 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
92 PAGER_MAP_SIZE, 0, FALSE, NULL);
93 simple_lock_init(&pager_map_wanted_lock);
94 pager_map_wanted = FALSE;
95
96 /*
97 * init ASYNC I/O queue
98 */
99
100 TAILQ_INIT(&uvm.aio_done);
101
102 /*
103 * call pager init functions
104 */
105 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
106 lcv++) {
107 if (uvmpagerops[lcv]->pgo_init)
108 uvmpagerops[lcv]->pgo_init();
109 }
110 }
111
112 /*
113 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
114 *
115 * we basically just map in a blank map entry to reserve the space in the
116 * map and then use pmap_enter() to put the mappings in by hand.
117 *
118 * XXX It would be nice to know the direction of the I/O, so that we can
119 * XXX map only what is necessary.
120 */
121
122 vaddr_t
123 uvm_pagermapin(pps, npages, aiop, waitf)
124 struct vm_page **pps;
125 int npages;
126 struct uvm_aiodesc **aiop; /* OUT */
127 int waitf;
128 {
129 vsize_t size;
130 vaddr_t kva;
131 struct uvm_aiodesc *aio;
132 vaddr_t cva;
133 struct vm_page *pp;
134 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
135
136 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
137 pps, npages, aiop, waitf);
138
139 ReStart:
140 if (aiop) {
141 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
142 if (aio == NULL)
143 return(0);
144 *aiop = aio;
145 } else {
146 aio = NULL;
147 }
148
149 size = npages << PAGE_SHIFT;
150 kva = NULL; /* let system choose VA */
151
152 if (uvm_map(pager_map, &kva, size, NULL,
153 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
154 if (waitf == M_NOWAIT) {
155 if (aio)
156 FREE(aio, M_TEMP);
157 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
158 return(NULL);
159 }
160 simple_lock(&pager_map_wanted_lock);
161 pager_map_wanted = TRUE;
162 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
163 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
164 "pager_map",0);
165 goto ReStart;
166 }
167
168 /* got it */
169 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
170 pp = *pps++;
171 #ifdef DEBUG
172 if ((pp->flags & PG_BUSY) == 0)
173 panic("uvm_pagermapin: page not busy");
174 #endif
175
176 /*
177 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
178 * XXX really necessary? It could lead to unnecessary
179 * XXX instruction cache flushes.
180 */
181 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
182 VM_PROT_DEFAULT, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
183 }
184
185 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
186 return(kva);
187 }
188
189 /*
190 * uvm_pagermapout: remove pager_map mapping
191 *
192 * we remove our mappings by hand and then remove the mapping (waking
193 * up anyone wanting space).
194 */
195
196 void
197 uvm_pagermapout(kva, npages)
198 vaddr_t kva;
199 int npages;
200 {
201 vsize_t size = npages << PAGE_SHIFT;
202 vm_map_entry_t entries;
203 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
204
205 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
206
207 /*
208 * duplicate uvm_unmap, but add in pager_map_wanted handling.
209 */
210
211 vm_map_lock(pager_map);
212 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
213 simple_lock(&pager_map_wanted_lock);
214 if (pager_map_wanted) {
215 pager_map_wanted = FALSE;
216 wakeup(pager_map);
217 }
218 simple_unlock(&pager_map_wanted_lock);
219 vm_map_unlock(pager_map);
220 if (entries)
221 uvm_unmap_detach(entries, 0);
222
223 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
224 }
225
226 /*
227 * uvm_mk_pcluster
228 *
229 * generic "make 'pager put' cluster" function. a pager can either
230 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
231 * generic function, or [3] set it to a pager specific function.
232 *
233 * => caller must lock object _and_ pagequeues (since we need to look
234 * at active vs. inactive bits, etc.)
235 * => caller must make center page busy and write-protect it
236 * => we mark all cluster pages busy for the caller
237 * => the caller must unbusy all pages (and check wanted/released
238 * status if it drops the object lock)
239 * => flags:
240 * PGO_ALLPAGES: all pages in object are valid targets
241 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
242 * PGO_DOACTCLUST: include active pages in cluster.
243 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
244 * PG_CLEANCHK is only a hint, but clearing will help reduce
245 * the number of calls we make to the pmap layer.
246 */
247
248 struct vm_page **
249 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
250 struct uvm_object *uobj; /* IN */
251 struct vm_page **pps, *center; /* IN/OUT, IN */
252 int *npages, flags; /* IN/OUT, IN */
253 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
254 {
255 struct vm_page **ppsp, *pclust;
256 voff_t lo, hi, curoff;
257 int center_idx, forward;
258 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
259
260 /*
261 * center page should already be busy and write protected. XXX:
262 * suppose page is wired? if we lock, then a process could
263 * fault/block on it. if we don't lock, a process could write the
264 * pages in the middle of an I/O. (consider an msync()). let's
265 * lock it for now (better to delay than corrupt data?).
266 */
267
268 /*
269 * get cluster boundaries, check sanity, and apply our limits as well.
270 */
271
272 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
273 if ((flags & PGO_ALLPAGES) == 0) {
274 if (lo < mlo)
275 lo = mlo;
276 if (hi > mhi)
277 hi = mhi;
278 }
279 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
280 #ifdef DIAGNOSTIC
281 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
282 #endif
283 pps[0] = center;
284 *npages = 1;
285 return(pps);
286 }
287
288 /*
289 * now determine the center and attempt to cluster around the
290 * edges
291 */
292
293 center_idx = (center->offset - lo) >> PAGE_SHIFT;
294 pps[center_idx] = center; /* plug in the center page */
295 ppsp = &pps[center_idx];
296 *npages = 1;
297
298 /*
299 * attempt to cluster around the left [backward], and then
300 * the right side [forward].
301 *
302 * note that for inactive pages (pages that have been deactivated)
303 * there are no valid mappings and PG_CLEAN should be up to date.
304 * [i.e. there is no need to query the pmap with pmap_is_modified
305 * since there are no mappings].
306 */
307
308 for (forward = 0 ; forward <= 1 ; forward++) {
309
310 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
311 for ( ;(forward == 0 && curoff >= lo) ||
312 (forward && curoff < hi);
313 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
314
315 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
316 if (pclust == NULL)
317 break; /* no page */
318 /* handle active pages */
319 /* NOTE: inactive pages don't have pmap mappings */
320 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
321 if ((flags & PGO_DOACTCLUST) == 0)
322 /* dont want mapped pages at all */
323 break;
324
325 /* make sure "clean" bit is sync'd */
326 if ((pclust->flags & PG_CLEANCHK) == 0) {
327 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
328 == PG_CLEAN &&
329 pmap_is_modified(pclust))
330 pclust->flags &= ~PG_CLEAN;
331
332 /* now checked */
333 pclust->flags |= PG_CLEANCHK;
334 }
335 }
336 /* is page available for cleaning and does it need it */
337 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
338 break; /* page is already clean or is busy */
339
340 /* yes! enroll the page in our array */
341 pclust->flags |= PG_BUSY; /* busy! */
342 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
343 /* XXX: protect wired page? see above comment. */
344 pmap_page_protect(pclust, VM_PROT_READ);
345 if (!forward) {
346 ppsp--; /* back up one page */
347 *ppsp = pclust;
348 } else {
349 /* move forward one page */
350 ppsp[*npages] = pclust;
351 }
352 *npages = *npages + 1;
353 }
354 }
355
356 /*
357 * done! return the cluster array to the caller!!!
358 */
359
360 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
361 return(ppsp);
362 }
363
364
365 /*
366 * uvm_shareprot: generic share protect routine
367 *
368 * => caller must lock map entry's map
369 * => caller must lock object pointed to by map entry
370 */
371
372 void
373 uvm_shareprot(entry, prot)
374 vm_map_entry_t entry;
375 vm_prot_t prot;
376 {
377 struct uvm_object *uobj = entry->object.uvm_obj;
378 struct vm_page *pp;
379 voff_t start, stop;
380 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
381
382 if (UVM_ET_ISSUBMAP(entry))
383 panic("uvm_shareprot: non-object attached");
384
385 start = entry->offset;
386 stop = start + (entry->end - entry->start);
387
388 /*
389 * traverse list of pages in object. if page in range, pmap_prot it
390 */
391
392 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
393 if (pp->offset >= start && pp->offset < stop)
394 pmap_page_protect(pp, prot);
395 }
396 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
397 }
398
399 /*
400 * uvm_pager_put: high level pageout routine
401 *
402 * we want to pageout page "pg" to backing store, clustering if
403 * possible.
404 *
405 * => page queues must be locked by caller
406 * => if page is not swap-backed, then "uobj" points to the object
407 * backing it. this object should be locked by the caller.
408 * => if page is swap-backed, then "uobj" should be NULL.
409 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
410 * for swap-backed memory, "pg" can be NULL if there is no page
411 * of interest [sometimes the case for the pagedaemon]
412 * => "ppsp_ptr" should point to an array of npages vm_page pointers
413 * for possible cluster building
414 * => flags (first two for non-swap-backed pages)
415 * PGO_ALLPAGES: all pages in uobj are valid targets
416 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
417 * PGO_SYNCIO: do SYNC I/O (no async)
418 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
419 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
420 * if (!uobj) start is the (daddr_t) of the starting swapblk
421 * => return state:
422 * 1. we return the VM_PAGER status code of the pageout
423 * 2. we return with the page queues unlocked
424 * 3. if (uobj != NULL) [!swap_backed] we return with
425 * uobj locked _only_ if PGO_PDFREECLUST is set
426 * AND result != VM_PAGER_PEND. in all other cases
427 * we return with uobj unlocked. [this is a hack
428 * that allows the pagedaemon to save one lock/unlock
429 * pair in the !swap_backed case since we have to
430 * lock the uobj to drop the cluster anyway]
431 * 4. on errors we always drop the cluster. thus, if we return
432 * !PEND, !OK, then the caller only has to worry about
433 * un-busying the main page (not the cluster pages).
434 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
435 * with all pages busy (caller must un-busy and check
436 * wanted/released flags).
437 */
438
439 int
440 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
441 struct uvm_object *uobj; /* IN */
442 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
443 int *npages; /* IN/OUT */
444 int flags; /* IN */
445 voff_t start, stop; /* IN, IN */
446 {
447 int result;
448 daddr_t swblk;
449 struct vm_page **ppsp = *ppsp_ptr;
450
451 /*
452 * note that uobj is null if we are doing a swap-backed pageout.
453 * note that uobj is !null if we are doing normal object pageout.
454 * note that the page queues must be locked to cluster.
455 */
456
457 if (uobj) { /* if !swap-backed */
458
459 /*
460 * attempt to build a cluster for pageout using its
461 * make-put-cluster function (if it has one).
462 */
463
464 if (uobj->pgops->pgo_mk_pcluster) {
465 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
466 npages, pg, flags, start, stop);
467 *ppsp_ptr = ppsp; /* update caller's pointer */
468 } else {
469 ppsp[0] = pg;
470 *npages = 1;
471 }
472
473 swblk = 0; /* XXX: keep gcc happy */
474
475 } else {
476
477 /*
478 * for swap-backed pageout, the caller (the pagedaemon) has
479 * already built the cluster for us. the starting swap
480 * block we are writing to has been passed in as "start."
481 * "pg" could be NULL if there is no page we are especially
482 * interested in (in which case the whole cluster gets dropped
483 * in the event of an error or a sync "done").
484 */
485 swblk = (daddr_t) start;
486 /* ppsp and npages should be ok */
487 }
488
489 /* now that we've clustered we can unlock the page queues */
490 uvm_unlock_pageq();
491
492 /*
493 * now attempt the I/O. if we have a failure and we are
494 * clustered, we will drop the cluster and try again.
495 */
496
497 ReTry:
498 if (uobj) {
499 /* object is locked */
500 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
501 flags & PGO_SYNCIO);
502 /* object is now unlocked */
503 } else {
504 /* nothing locked */
505 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
506 /* nothing locked */
507 }
508
509 /*
510 * we have attempted the I/O.
511 *
512 * if the I/O was a success then:
513 * if !PGO_PDFREECLUST, we return the cluster to the
514 * caller (who must un-busy all pages)
515 * else we un-busy cluster pages for the pagedaemon
516 *
517 * if I/O is pending (async i/o) then we return the pending code.
518 * [in this case the async i/o done function must clean up when
519 * i/o is done...]
520 */
521
522 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
523 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
524 /*
525 * drop cluster and relock object (only if I/O is
526 * not pending)
527 */
528 if (uobj)
529 /* required for dropcluster */
530 simple_lock(&uobj->vmobjlock);
531 if (*npages > 1 || pg == NULL)
532 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
533 PGO_PDFREECLUST);
534 /* if (uobj): object still locked, as per
535 * return-state item #3 */
536 }
537 return (result);
538 }
539
540 /*
541 * a pager error occured.
542 * for transient errors, drop to a cluster of 1 page ("pg")
543 * and try again. for hard errors, don't bother retrying.
544 */
545
546 if (*npages > 1 || pg == NULL) {
547 if (uobj) {
548 simple_lock(&uobj->vmobjlock);
549 }
550 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
551
552 /*
553 * for failed swap-backed pageouts with a "pg",
554 * we need to reset pg's swslot to either:
555 * "swblk" (for transient errors, so we can retry),
556 * or 0 (for hard errors).
557 */
558
559 if (uobj == NULL && pg != NULL) {
560 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
561 if (pg->pqflags & PQ_ANON) {
562 simple_lock(&pg->uanon->an_lock);
563 pg->uanon->an_swslot = nswblk;
564 simple_unlock(&pg->uanon->an_lock);
565 } else {
566 simple_lock(&pg->uobject->vmobjlock);
567 uao_set_swslot(pg->uobject,
568 pg->offset >> PAGE_SHIFT,
569 nswblk);
570 simple_unlock(&pg->uobject->vmobjlock);
571 }
572 }
573 if (result == VM_PAGER_AGAIN) {
574
575 /*
576 * for transient failures, free all the swslots that
577 * we're not going to retry with.
578 */
579
580 if (uobj == NULL) {
581 if (pg) {
582 uvm_swap_free(swblk + 1, *npages - 1);
583 } else {
584 uvm_swap_free(swblk, *npages);
585 }
586 }
587 if (pg) {
588 ppsp[0] = pg;
589 *npages = 1;
590 goto ReTry;
591 }
592 } else if (uobj == NULL) {
593
594 /*
595 * for hard errors on swap-backed pageouts,
596 * mark the swslots as bad. note that we do not
597 * free swslots that we mark bad.
598 */
599
600 uvm_swap_markbad(swblk, *npages);
601 }
602 }
603
604 /*
605 * a pager error occured (even after dropping the cluster, if there
606 * was one). give up! the caller only has one page ("pg")
607 * to worry about.
608 */
609
610 if (uobj && (flags & PGO_PDFREECLUST) != 0)
611 simple_lock(&uobj->vmobjlock);
612 return(result);
613 }
614
615 /*
616 * uvm_pager_dropcluster: drop a cluster we have built (because we
617 * got an error, or, if PGO_PDFREECLUST we are un-busying the
618 * cluster pages on behalf of the pagedaemon).
619 *
620 * => uobj, if non-null, is a non-swap-backed object that is
621 * locked by the caller. we return with this object still
622 * locked.
623 * => page queues are not locked
624 * => pg is our page of interest (the one we clustered around, can be null)
625 * => ppsp/npages is our current cluster
626 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
627 * pages on behalf of the pagedaemon.
628 * PGO_REALLOCSWAP: drop previously allocated swap slots for
629 * clustered swap-backed pages (except for "pg" if !NULL)
630 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
631 * [only meaningful if swap-backed (uobj == NULL)]
632 */
633
634 void
635 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
636 struct uvm_object *uobj; /* IN */
637 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
638 int *npages; /* IN/OUT */
639 int flags;
640 {
641 int lcv;
642 boolean_t obj_is_alive;
643 struct uvm_object *saved_uobj;
644
645 /*
646 * drop all pages but "pg"
647 */
648
649 for (lcv = 0 ; lcv < *npages ; lcv++) {
650
651 if (ppsp[lcv] == pg) /* skip "pg" */
652 continue;
653
654 /*
655 * if swap-backed, gain lock on object that owns page. note
656 * that PQ_ANON bit can't change as long as we are holding
657 * the PG_BUSY bit (so there is no need to lock the page
658 * queues to test it).
659 *
660 * once we have the lock, dispose of the pointer to swap, if
661 * requested
662 */
663 if (!uobj) {
664 if (ppsp[lcv]->pqflags & PQ_ANON) {
665 simple_lock(&ppsp[lcv]->uanon->an_lock);
666 if (flags & PGO_REALLOCSWAP)
667 /* zap swap block */
668 ppsp[lcv]->uanon->an_swslot = 0;
669 } else {
670 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
671 if (flags & PGO_REALLOCSWAP)
672 uao_set_swslot(ppsp[lcv]->uobject,
673 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
674 }
675 }
676
677 /* did someone want the page while we had it busy-locked? */
678 if (ppsp[lcv]->flags & PG_WANTED)
679 /* still holding obj lock */
680 wakeup(ppsp[lcv]);
681
682 /* if page was released, release it. otherwise un-busy it */
683 if (ppsp[lcv]->flags & PG_RELEASED) {
684
685 if (ppsp[lcv]->pqflags & PQ_ANON) {
686 /* so that anfree will free */
687 ppsp[lcv]->flags &= ~(PG_BUSY);
688 UVM_PAGE_OWN(ppsp[lcv], NULL);
689
690 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
691 simple_unlock(&ppsp[lcv]->uanon->an_lock);
692 /* kills anon and frees pg */
693 uvm_anfree(ppsp[lcv]->uanon);
694
695 continue;
696 }
697
698 /*
699 * pgo_releasepg will dump the page for us
700 */
701
702 #ifdef DIAGNOSTIC
703 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
704 panic("uvm_pager_dropcluster: no releasepg "
705 "function");
706 #endif
707 saved_uobj = ppsp[lcv]->uobject;
708 obj_is_alive =
709 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
710
711 #ifdef DIAGNOSTIC
712 /* for normal objects, "pg" is still PG_BUSY by us,
713 * so obj can't die */
714 if (uobj && !obj_is_alive)
715 panic("uvm_pager_dropcluster: object died "
716 "with active page");
717 #endif
718 /* only unlock the object if it is still alive... */
719 if (obj_is_alive && saved_uobj != uobj)
720 simple_unlock(&saved_uobj->vmobjlock);
721
722 /*
723 * XXXCDC: suppose uobj died in the pgo_releasepg?
724 * how pass that
725 * info up to caller. we are currently ignoring it...
726 */
727
728 continue; /* next page */
729
730 } else {
731 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
732 UVM_PAGE_OWN(ppsp[lcv], NULL);
733 }
734
735 /*
736 * if we are operating on behalf of the pagedaemon and we
737 * had a successful pageout update the page!
738 */
739 if (flags & PGO_PDFREECLUST) {
740 pmap_clear_reference(ppsp[lcv]);
741 pmap_clear_modify(ppsp[lcv]);
742 ppsp[lcv]->flags |= PG_CLEAN;
743 }
744
745 /* if anonymous cluster, unlock object and move on */
746 if (!uobj) {
747 if (ppsp[lcv]->pqflags & PQ_ANON)
748 simple_unlock(&ppsp[lcv]->uanon->an_lock);
749 else
750 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
751 }
752 }
753 }
754