uvm_pager.c revision 1.34 1 /* $NetBSD: uvm_pager.c,v 1.34 2000/11/24 22:41:39 chs Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47
48 #define UVM_PAGER
49 #include <uvm/uvm.h>
50
51 /*
52 * list of uvm pagers in the system
53 */
54
55 extern struct uvm_pagerops uvm_deviceops;
56 extern struct uvm_pagerops uvm_vnodeops;
57
58 struct uvm_pagerops *uvmpagerops[] = {
59 &aobj_pager,
60 &uvm_deviceops,
61 &uvm_vnodeops,
62 };
63
64 /*
65 * the pager map: provides KVA for I/O
66 */
67
68 vm_map_t pager_map; /* XXX */
69 simple_lock_data_t pager_map_wanted_lock;
70 boolean_t pager_map_wanted; /* locked by pager map */
71
72
73 /*
74 * uvm_pager_init: init pagers (at boot time)
75 */
76
77 void
78 uvm_pager_init()
79 {
80 int lcv;
81
82 /*
83 * init pager map
84 */
85
86 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
87 PAGER_MAP_SIZE, 0, FALSE, NULL);
88 simple_lock_init(&pager_map_wanted_lock);
89 pager_map_wanted = FALSE;
90
91 /*
92 * init ASYNC I/O queue
93 */
94
95 TAILQ_INIT(&uvm.aio_done);
96
97 /*
98 * call pager init functions
99 */
100 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
101 lcv++) {
102 if (uvmpagerops[lcv]->pgo_init)
103 uvmpagerops[lcv]->pgo_init();
104 }
105 }
106
107 /*
108 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
109 *
110 * we basically just map in a blank map entry to reserve the space in the
111 * map and then use pmap_enter() to put the mappings in by hand.
112 */
113
114 vaddr_t
115 uvm_pagermapin(pps, npages, aiop, flags)
116 struct vm_page **pps;
117 int npages;
118 struct uvm_aiodesc **aiop; /* OUT */
119 int flags;
120 {
121 vsize_t size;
122 vaddr_t kva;
123 struct uvm_aiodesc *aio;
124 vaddr_t cva;
125 struct vm_page *pp;
126 vm_prot_t prot;
127 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
128
129 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
130 pps, npages, aiop, flags);
131
132 /*
133 * compute protection. outgoing I/O only needs read
134 * access to the page, whereas incoming needs read/write.
135 */
136
137 prot = VM_PROT_READ;
138 if (flags & UVMPAGER_MAPIN_READ)
139 prot |= VM_PROT_WRITE;
140
141 ReStart:
142 if (aiop) {
143 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
144 (flags & UVMPAGER_MAPIN_WAITOK));
145 if (aio == NULL)
146 return(0);
147 *aiop = aio;
148 } else {
149 aio = NULL;
150 }
151
152 size = npages << PAGE_SHIFT;
153 kva = 0; /* let system choose VA */
154
155 if (uvm_map(pager_map, &kva, size, NULL,
156 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
157 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
158 if (aio)
159 FREE(aio, M_TEMP);
160 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
161 return(0);
162 }
163 simple_lock(&pager_map_wanted_lock);
164 pager_map_wanted = TRUE;
165 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
166 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
167 "pager_map",0);
168 goto ReStart;
169 }
170
171 /* got it */
172 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
173 pp = *pps++;
174 #ifdef DEBUG
175 if ((pp->flags & PG_BUSY) == 0)
176 panic("uvm_pagermapin: page not busy");
177 #endif
178 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
179 prot, PMAP_WIRED | prot);
180 }
181
182 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
183 return(kva);
184 }
185
186 /*
187 * uvm_pagermapout: remove pager_map mapping
188 *
189 * we remove our mappings by hand and then remove the mapping (waking
190 * up anyone wanting space).
191 */
192
193 void
194 uvm_pagermapout(kva, npages)
195 vaddr_t kva;
196 int npages;
197 {
198 vsize_t size = npages << PAGE_SHIFT;
199 vm_map_entry_t entries;
200 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
201
202 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
203
204 /*
205 * duplicate uvm_unmap, but add in pager_map_wanted handling.
206 */
207
208 vm_map_lock(pager_map);
209 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
210 simple_lock(&pager_map_wanted_lock);
211 if (pager_map_wanted) {
212 pager_map_wanted = FALSE;
213 wakeup(pager_map);
214 }
215 simple_unlock(&pager_map_wanted_lock);
216 vm_map_unlock(pager_map);
217 if (entries)
218 uvm_unmap_detach(entries, 0);
219
220 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
221 }
222
223 /*
224 * uvm_mk_pcluster
225 *
226 * generic "make 'pager put' cluster" function. a pager can either
227 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
228 * generic function, or [3] set it to a pager specific function.
229 *
230 * => caller must lock object _and_ pagequeues (since we need to look
231 * at active vs. inactive bits, etc.)
232 * => caller must make center page busy and write-protect it
233 * => we mark all cluster pages busy for the caller
234 * => the caller must unbusy all pages (and check wanted/released
235 * status if it drops the object lock)
236 * => flags:
237 * PGO_ALLPAGES: all pages in object are valid targets
238 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
239 * PGO_DOACTCLUST: include active pages in cluster.
240 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
241 * PG_CLEANCHK is only a hint, but clearing will help reduce
242 * the number of calls we make to the pmap layer.
243 */
244
245 struct vm_page **
246 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
247 struct uvm_object *uobj; /* IN */
248 struct vm_page **pps, *center; /* IN/OUT, IN */
249 int *npages, flags; /* IN/OUT, IN */
250 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
251 {
252 struct vm_page **ppsp, *pclust;
253 voff_t lo, hi, curoff;
254 int center_idx, forward;
255 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
256
257 /*
258 * center page should already be busy and write protected. XXX:
259 * suppose page is wired? if we lock, then a process could
260 * fault/block on it. if we don't lock, a process could write the
261 * pages in the middle of an I/O. (consider an msync()). let's
262 * lock it for now (better to delay than corrupt data?).
263 */
264
265 /*
266 * get cluster boundaries, check sanity, and apply our limits as well.
267 */
268
269 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
270 if ((flags & PGO_ALLPAGES) == 0) {
271 if (lo < mlo)
272 lo = mlo;
273 if (hi > mhi)
274 hi = mhi;
275 }
276 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
277 #ifdef DIAGNOSTIC
278 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
279 #endif
280 pps[0] = center;
281 *npages = 1;
282 return(pps);
283 }
284
285 /*
286 * now determine the center and attempt to cluster around the
287 * edges
288 */
289
290 center_idx = (center->offset - lo) >> PAGE_SHIFT;
291 pps[center_idx] = center; /* plug in the center page */
292 ppsp = &pps[center_idx];
293 *npages = 1;
294
295 /*
296 * attempt to cluster around the left [backward], and then
297 * the right side [forward].
298 *
299 * note that for inactive pages (pages that have been deactivated)
300 * there are no valid mappings and PG_CLEAN should be up to date.
301 * [i.e. there is no need to query the pmap with pmap_is_modified
302 * since there are no mappings].
303 */
304
305 for (forward = 0 ; forward <= 1 ; forward++) {
306
307 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
308 for ( ;(forward == 0 && curoff >= lo) ||
309 (forward && curoff < hi);
310 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
311
312 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
313 if (pclust == NULL)
314 break; /* no page */
315 /* handle active pages */
316 /* NOTE: inactive pages don't have pmap mappings */
317 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
318 if ((flags & PGO_DOACTCLUST) == 0)
319 /* dont want mapped pages at all */
320 break;
321
322 /* make sure "clean" bit is sync'd */
323 if ((pclust->flags & PG_CLEANCHK) == 0) {
324 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
325 == PG_CLEAN &&
326 pmap_is_modified(pclust))
327 pclust->flags &= ~PG_CLEAN;
328
329 /* now checked */
330 pclust->flags |= PG_CLEANCHK;
331 }
332 }
333 /* is page available for cleaning and does it need it */
334 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
335 break; /* page is already clean or is busy */
336
337 /* yes! enroll the page in our array */
338 pclust->flags |= PG_BUSY; /* busy! */
339 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
340 /* XXX: protect wired page? see above comment. */
341 pmap_page_protect(pclust, VM_PROT_READ);
342 if (!forward) {
343 ppsp--; /* back up one page */
344 *ppsp = pclust;
345 } else {
346 /* move forward one page */
347 ppsp[*npages] = pclust;
348 }
349 *npages = *npages + 1;
350 }
351 }
352
353 /*
354 * done! return the cluster array to the caller!!!
355 */
356
357 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
358 return(ppsp);
359 }
360
361 /*
362 * uvm_pager_put: high level pageout routine
363 *
364 * we want to pageout page "pg" to backing store, clustering if
365 * possible.
366 *
367 * => page queues must be locked by caller
368 * => if page is not swap-backed, then "uobj" points to the object
369 * backing it. this object should be locked by the caller.
370 * => if page is swap-backed, then "uobj" should be NULL.
371 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
372 * for swap-backed memory, "pg" can be NULL if there is no page
373 * of interest [sometimes the case for the pagedaemon]
374 * => "ppsp_ptr" should point to an array of npages vm_page pointers
375 * for possible cluster building
376 * => flags (first two for non-swap-backed pages)
377 * PGO_ALLPAGES: all pages in uobj are valid targets
378 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
379 * PGO_SYNCIO: do SYNC I/O (no async)
380 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
381 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
382 * if (!uobj) start is the (daddr_t) of the starting swapblk
383 * => return state:
384 * 1. we return the VM_PAGER status code of the pageout
385 * 2. we return with the page queues unlocked
386 * 3. if (uobj != NULL) [!swap_backed] we return with
387 * uobj locked _only_ if PGO_PDFREECLUST is set
388 * AND result != VM_PAGER_PEND. in all other cases
389 * we return with uobj unlocked. [this is a hack
390 * that allows the pagedaemon to save one lock/unlock
391 * pair in the !swap_backed case since we have to
392 * lock the uobj to drop the cluster anyway]
393 * 4. on errors we always drop the cluster. thus, if we return
394 * !PEND, !OK, then the caller only has to worry about
395 * un-busying the main page (not the cluster pages).
396 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
397 * with all pages busy (caller must un-busy and check
398 * wanted/released flags).
399 */
400
401 int
402 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
403 struct uvm_object *uobj; /* IN */
404 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
405 int *npages; /* IN/OUT */
406 int flags; /* IN */
407 voff_t start, stop; /* IN, IN */
408 {
409 int result;
410 daddr_t swblk;
411 struct vm_page **ppsp = *ppsp_ptr;
412
413 /*
414 * note that uobj is null if we are doing a swap-backed pageout.
415 * note that uobj is !null if we are doing normal object pageout.
416 * note that the page queues must be locked to cluster.
417 */
418
419 if (uobj) { /* if !swap-backed */
420
421 /*
422 * attempt to build a cluster for pageout using its
423 * make-put-cluster function (if it has one).
424 */
425
426 if (uobj->pgops->pgo_mk_pcluster) {
427 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
428 npages, pg, flags, start, stop);
429 *ppsp_ptr = ppsp; /* update caller's pointer */
430 } else {
431 ppsp[0] = pg;
432 *npages = 1;
433 }
434
435 swblk = 0; /* XXX: keep gcc happy */
436
437 } else {
438
439 /*
440 * for swap-backed pageout, the caller (the pagedaemon) has
441 * already built the cluster for us. the starting swap
442 * block we are writing to has been passed in as "start."
443 * "pg" could be NULL if there is no page we are especially
444 * interested in (in which case the whole cluster gets dropped
445 * in the event of an error or a sync "done").
446 */
447 swblk = (daddr_t) start;
448 /* ppsp and npages should be ok */
449 }
450
451 /* now that we've clustered we can unlock the page queues */
452 uvm_unlock_pageq();
453
454 /*
455 * now attempt the I/O. if we have a failure and we are
456 * clustered, we will drop the cluster and try again.
457 */
458
459 ReTry:
460 if (uobj) {
461 /* object is locked */
462 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
463 flags & PGO_SYNCIO);
464 /* object is now unlocked */
465 } else {
466 /* nothing locked */
467 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
468 /* nothing locked */
469 }
470
471 /*
472 * we have attempted the I/O.
473 *
474 * if the I/O was a success then:
475 * if !PGO_PDFREECLUST, we return the cluster to the
476 * caller (who must un-busy all pages)
477 * else we un-busy cluster pages for the pagedaemon
478 *
479 * if I/O is pending (async i/o) then we return the pending code.
480 * [in this case the async i/o done function must clean up when
481 * i/o is done...]
482 */
483
484 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
485 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
486 /*
487 * drop cluster and relock object (only if I/O is
488 * not pending)
489 */
490 if (uobj)
491 /* required for dropcluster */
492 simple_lock(&uobj->vmobjlock);
493 if (*npages > 1 || pg == NULL)
494 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
495 PGO_PDFREECLUST);
496 /* if (uobj): object still locked, as per
497 * return-state item #3 */
498 }
499 return (result);
500 }
501
502 /*
503 * a pager error occured.
504 * for transient errors, drop to a cluster of 1 page ("pg")
505 * and try again. for hard errors, don't bother retrying.
506 */
507
508 if (*npages > 1 || pg == NULL) {
509 if (uobj) {
510 simple_lock(&uobj->vmobjlock);
511 }
512 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
513
514 /*
515 * for failed swap-backed pageouts with a "pg",
516 * we need to reset pg's swslot to either:
517 * "swblk" (for transient errors, so we can retry),
518 * or 0 (for hard errors).
519 */
520
521 if (uobj == NULL && pg != NULL) {
522 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
523 if (pg->pqflags & PQ_ANON) {
524 simple_lock(&pg->uanon->an_lock);
525 pg->uanon->an_swslot = nswblk;
526 simple_unlock(&pg->uanon->an_lock);
527 } else {
528 simple_lock(&pg->uobject->vmobjlock);
529 uao_set_swslot(pg->uobject,
530 pg->offset >> PAGE_SHIFT,
531 nswblk);
532 simple_unlock(&pg->uobject->vmobjlock);
533 }
534 }
535 if (result == VM_PAGER_AGAIN) {
536
537 /*
538 * for transient failures, free all the swslots that
539 * we're not going to retry with.
540 */
541
542 if (uobj == NULL) {
543 if (pg) {
544 uvm_swap_free(swblk + 1, *npages - 1);
545 } else {
546 uvm_swap_free(swblk, *npages);
547 }
548 }
549 if (pg) {
550 ppsp[0] = pg;
551 *npages = 1;
552 goto ReTry;
553 }
554 } else if (uobj == NULL) {
555
556 /*
557 * for hard errors on swap-backed pageouts,
558 * mark the swslots as bad. note that we do not
559 * free swslots that we mark bad.
560 */
561
562 uvm_swap_markbad(swblk, *npages);
563 }
564 }
565
566 /*
567 * a pager error occured (even after dropping the cluster, if there
568 * was one). give up! the caller only has one page ("pg")
569 * to worry about.
570 */
571
572 if (uobj && (flags & PGO_PDFREECLUST) != 0)
573 simple_lock(&uobj->vmobjlock);
574 return(result);
575 }
576
577 /*
578 * uvm_pager_dropcluster: drop a cluster we have built (because we
579 * got an error, or, if PGO_PDFREECLUST we are un-busying the
580 * cluster pages on behalf of the pagedaemon).
581 *
582 * => uobj, if non-null, is a non-swap-backed object that is
583 * locked by the caller. we return with this object still
584 * locked.
585 * => page queues are not locked
586 * => pg is our page of interest (the one we clustered around, can be null)
587 * => ppsp/npages is our current cluster
588 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
589 * pages on behalf of the pagedaemon.
590 * PGO_REALLOCSWAP: drop previously allocated swap slots for
591 * clustered swap-backed pages (except for "pg" if !NULL)
592 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
593 * [only meaningful if swap-backed (uobj == NULL)]
594 */
595
596 void
597 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
598 struct uvm_object *uobj; /* IN */
599 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
600 int *npages; /* IN/OUT */
601 int flags;
602 {
603 int lcv;
604 boolean_t obj_is_alive;
605 struct uvm_object *saved_uobj;
606
607 /*
608 * drop all pages but "pg"
609 */
610
611 for (lcv = 0 ; lcv < *npages ; lcv++) {
612
613 if (ppsp[lcv] == pg) /* skip "pg" */
614 continue;
615
616 /*
617 * if swap-backed, gain lock on object that owns page. note
618 * that PQ_ANON bit can't change as long as we are holding
619 * the PG_BUSY bit (so there is no need to lock the page
620 * queues to test it).
621 *
622 * once we have the lock, dispose of the pointer to swap, if
623 * requested
624 */
625 if (!uobj) {
626 if (ppsp[lcv]->pqflags & PQ_ANON) {
627 simple_lock(&ppsp[lcv]->uanon->an_lock);
628 if (flags & PGO_REALLOCSWAP)
629 /* zap swap block */
630 ppsp[lcv]->uanon->an_swslot = 0;
631 } else {
632 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
633 if (flags & PGO_REALLOCSWAP)
634 uao_set_swslot(ppsp[lcv]->uobject,
635 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
636 }
637 }
638
639 /* did someone want the page while we had it busy-locked? */
640 if (ppsp[lcv]->flags & PG_WANTED)
641 /* still holding obj lock */
642 wakeup(ppsp[lcv]);
643
644 /* if page was released, release it. otherwise un-busy it */
645 if (ppsp[lcv]->flags & PG_RELEASED) {
646
647 if (ppsp[lcv]->pqflags & PQ_ANON) {
648 /* so that anfree will free */
649 ppsp[lcv]->flags &= ~(PG_BUSY);
650 UVM_PAGE_OWN(ppsp[lcv], NULL);
651
652 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
653 simple_unlock(&ppsp[lcv]->uanon->an_lock);
654 /* kills anon and frees pg */
655 uvm_anfree(ppsp[lcv]->uanon);
656
657 continue;
658 }
659
660 /*
661 * pgo_releasepg will dump the page for us
662 */
663
664 #ifdef DIAGNOSTIC
665 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
666 panic("uvm_pager_dropcluster: no releasepg "
667 "function");
668 #endif
669 saved_uobj = ppsp[lcv]->uobject;
670 obj_is_alive =
671 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
672
673 #ifdef DIAGNOSTIC
674 /* for normal objects, "pg" is still PG_BUSY by us,
675 * so obj can't die */
676 if (uobj && !obj_is_alive)
677 panic("uvm_pager_dropcluster: object died "
678 "with active page");
679 #endif
680 /* only unlock the object if it is still alive... */
681 if (obj_is_alive && saved_uobj != uobj)
682 simple_unlock(&saved_uobj->vmobjlock);
683
684 /*
685 * XXXCDC: suppose uobj died in the pgo_releasepg?
686 * how pass that
687 * info up to caller. we are currently ignoring it...
688 */
689
690 continue; /* next page */
691
692 } else {
693 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
694 UVM_PAGE_OWN(ppsp[lcv], NULL);
695 }
696
697 /*
698 * if we are operating on behalf of the pagedaemon and we
699 * had a successful pageout update the page!
700 */
701 if (flags & PGO_PDFREECLUST) {
702 pmap_clear_reference(ppsp[lcv]);
703 pmap_clear_modify(ppsp[lcv]);
704 ppsp[lcv]->flags |= PG_CLEAN;
705 }
706
707 /* if anonymous cluster, unlock object and move on */
708 if (!uobj) {
709 if (ppsp[lcv]->pqflags & PQ_ANON)
710 simple_unlock(&ppsp[lcv]->uanon->an_lock);
711 else
712 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
713 }
714 }
715 }
716