uvm_pager.c revision 1.31 1 /* $NetBSD: uvm_pager.c,v 1.31 2000/06/26 14:21:18 mrg Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47
48 #include <vm/vm.h>
49
50 #define UVM_PAGER
51 #include <uvm/uvm.h>
52
53 /*
54 * list of uvm pagers in the system
55 */
56
57 extern struct uvm_pagerops uvm_deviceops;
58 extern struct uvm_pagerops uvm_vnodeops;
59
60 struct uvm_pagerops *uvmpagerops[] = {
61 &aobj_pager,
62 &uvm_deviceops,
63 &uvm_vnodeops,
64 };
65
66 /*
67 * the pager map: provides KVA for I/O
68 */
69
70 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
71 vm_map_t pager_map; /* XXX */
72 simple_lock_data_t pager_map_wanted_lock;
73 boolean_t pager_map_wanted; /* locked by pager map */
74
75
76 /*
77 * uvm_pager_init: init pagers (at boot time)
78 */
79
80 void
81 uvm_pager_init()
82 {
83 int lcv;
84
85 /*
86 * init pager map
87 */
88
89 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
90 PAGER_MAP_SIZE, 0, FALSE, NULL);
91 simple_lock_init(&pager_map_wanted_lock);
92 pager_map_wanted = FALSE;
93
94 /*
95 * init ASYNC I/O queue
96 */
97
98 TAILQ_INIT(&uvm.aio_done);
99
100 /*
101 * call pager init functions
102 */
103 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
104 lcv++) {
105 if (uvmpagerops[lcv]->pgo_init)
106 uvmpagerops[lcv]->pgo_init();
107 }
108 }
109
110 /*
111 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
112 *
113 * we basically just map in a blank map entry to reserve the space in the
114 * map and then use pmap_enter() to put the mappings in by hand.
115 */
116
117 vaddr_t
118 uvm_pagermapin(pps, npages, aiop, flags)
119 struct vm_page **pps;
120 int npages;
121 struct uvm_aiodesc **aiop; /* OUT */
122 int flags;
123 {
124 vsize_t size;
125 vaddr_t kva;
126 struct uvm_aiodesc *aio;
127 vaddr_t cva;
128 struct vm_page *pp;
129 vm_prot_t prot;
130 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
131
132 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
133 pps, npages, aiop, flags);
134
135 /*
136 * compute protection. outgoing I/O only needs read
137 * access to the page, whereas incoming needs read/write.
138 */
139
140 prot = VM_PROT_READ;
141 if (flags & UVMPAGER_MAPIN_READ)
142 prot |= VM_PROT_WRITE;
143
144 ReStart:
145 if (aiop) {
146 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
147 (flags & UVMPAGER_MAPIN_WAITOK));
148 if (aio == NULL)
149 return(0);
150 *aiop = aio;
151 } else {
152 aio = NULL;
153 }
154
155 size = npages << PAGE_SHIFT;
156 kva = 0; /* let system choose VA */
157
158 if (uvm_map(pager_map, &kva, size, NULL,
159 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
160 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
161 if (aio)
162 FREE(aio, M_TEMP);
163 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
164 return(0);
165 }
166 simple_lock(&pager_map_wanted_lock);
167 pager_map_wanted = TRUE;
168 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
169 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
170 "pager_map",0);
171 goto ReStart;
172 }
173
174 /* got it */
175 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
176 pp = *pps++;
177 #ifdef DEBUG
178 if ((pp->flags & PG_BUSY) == 0)
179 panic("uvm_pagermapin: page not busy");
180 #endif
181 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
182 prot, PMAP_WIRED | prot);
183 }
184
185 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
186 return(kva);
187 }
188
189 /*
190 * uvm_pagermapout: remove pager_map mapping
191 *
192 * we remove our mappings by hand and then remove the mapping (waking
193 * up anyone wanting space).
194 */
195
196 void
197 uvm_pagermapout(kva, npages)
198 vaddr_t kva;
199 int npages;
200 {
201 vsize_t size = npages << PAGE_SHIFT;
202 vm_map_entry_t entries;
203 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
204
205 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
206
207 /*
208 * duplicate uvm_unmap, but add in pager_map_wanted handling.
209 */
210
211 vm_map_lock(pager_map);
212 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
213 simple_lock(&pager_map_wanted_lock);
214 if (pager_map_wanted) {
215 pager_map_wanted = FALSE;
216 wakeup(pager_map);
217 }
218 simple_unlock(&pager_map_wanted_lock);
219 vm_map_unlock(pager_map);
220 if (entries)
221 uvm_unmap_detach(entries, 0);
222
223 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
224 }
225
226 /*
227 * uvm_mk_pcluster
228 *
229 * generic "make 'pager put' cluster" function. a pager can either
230 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
231 * generic function, or [3] set it to a pager specific function.
232 *
233 * => caller must lock object _and_ pagequeues (since we need to look
234 * at active vs. inactive bits, etc.)
235 * => caller must make center page busy and write-protect it
236 * => we mark all cluster pages busy for the caller
237 * => the caller must unbusy all pages (and check wanted/released
238 * status if it drops the object lock)
239 * => flags:
240 * PGO_ALLPAGES: all pages in object are valid targets
241 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
242 * PGO_DOACTCLUST: include active pages in cluster.
243 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
244 * PG_CLEANCHK is only a hint, but clearing will help reduce
245 * the number of calls we make to the pmap layer.
246 */
247
248 struct vm_page **
249 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
250 struct uvm_object *uobj; /* IN */
251 struct vm_page **pps, *center; /* IN/OUT, IN */
252 int *npages, flags; /* IN/OUT, IN */
253 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
254 {
255 struct vm_page **ppsp, *pclust;
256 voff_t lo, hi, curoff;
257 int center_idx, forward;
258 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
259
260 /*
261 * center page should already be busy and write protected. XXX:
262 * suppose page is wired? if we lock, then a process could
263 * fault/block on it. if we don't lock, a process could write the
264 * pages in the middle of an I/O. (consider an msync()). let's
265 * lock it for now (better to delay than corrupt data?).
266 */
267
268 /*
269 * get cluster boundaries, check sanity, and apply our limits as well.
270 */
271
272 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
273 if ((flags & PGO_ALLPAGES) == 0) {
274 if (lo < mlo)
275 lo = mlo;
276 if (hi > mhi)
277 hi = mhi;
278 }
279 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
280 #ifdef DIAGNOSTIC
281 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
282 #endif
283 pps[0] = center;
284 *npages = 1;
285 return(pps);
286 }
287
288 /*
289 * now determine the center and attempt to cluster around the
290 * edges
291 */
292
293 center_idx = (center->offset - lo) >> PAGE_SHIFT;
294 pps[center_idx] = center; /* plug in the center page */
295 ppsp = &pps[center_idx];
296 *npages = 1;
297
298 /*
299 * attempt to cluster around the left [backward], and then
300 * the right side [forward].
301 *
302 * note that for inactive pages (pages that have been deactivated)
303 * there are no valid mappings and PG_CLEAN should be up to date.
304 * [i.e. there is no need to query the pmap with pmap_is_modified
305 * since there are no mappings].
306 */
307
308 for (forward = 0 ; forward <= 1 ; forward++) {
309
310 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
311 for ( ;(forward == 0 && curoff >= lo) ||
312 (forward && curoff < hi);
313 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
314
315 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
316 if (pclust == NULL)
317 break; /* no page */
318 /* handle active pages */
319 /* NOTE: inactive pages don't have pmap mappings */
320 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
321 if ((flags & PGO_DOACTCLUST) == 0)
322 /* dont want mapped pages at all */
323 break;
324
325 /* make sure "clean" bit is sync'd */
326 if ((pclust->flags & PG_CLEANCHK) == 0) {
327 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
328 == PG_CLEAN &&
329 pmap_is_modified(pclust))
330 pclust->flags &= ~PG_CLEAN;
331
332 /* now checked */
333 pclust->flags |= PG_CLEANCHK;
334 }
335 }
336 /* is page available for cleaning and does it need it */
337 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
338 break; /* page is already clean or is busy */
339
340 /* yes! enroll the page in our array */
341 pclust->flags |= PG_BUSY; /* busy! */
342 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
343 /* XXX: protect wired page? see above comment. */
344 pmap_page_protect(pclust, VM_PROT_READ);
345 if (!forward) {
346 ppsp--; /* back up one page */
347 *ppsp = pclust;
348 } else {
349 /* move forward one page */
350 ppsp[*npages] = pclust;
351 }
352 *npages = *npages + 1;
353 }
354 }
355
356 /*
357 * done! return the cluster array to the caller!!!
358 */
359
360 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
361 return(ppsp);
362 }
363
364 /*
365 * uvm_pager_put: high level pageout routine
366 *
367 * we want to pageout page "pg" to backing store, clustering if
368 * possible.
369 *
370 * => page queues must be locked by caller
371 * => if page is not swap-backed, then "uobj" points to the object
372 * backing it. this object should be locked by the caller.
373 * => if page is swap-backed, then "uobj" should be NULL.
374 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
375 * for swap-backed memory, "pg" can be NULL if there is no page
376 * of interest [sometimes the case for the pagedaemon]
377 * => "ppsp_ptr" should point to an array of npages vm_page pointers
378 * for possible cluster building
379 * => flags (first two for non-swap-backed pages)
380 * PGO_ALLPAGES: all pages in uobj are valid targets
381 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
382 * PGO_SYNCIO: do SYNC I/O (no async)
383 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
384 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
385 * if (!uobj) start is the (daddr_t) of the starting swapblk
386 * => return state:
387 * 1. we return the VM_PAGER status code of the pageout
388 * 2. we return with the page queues unlocked
389 * 3. if (uobj != NULL) [!swap_backed] we return with
390 * uobj locked _only_ if PGO_PDFREECLUST is set
391 * AND result != VM_PAGER_PEND. in all other cases
392 * we return with uobj unlocked. [this is a hack
393 * that allows the pagedaemon to save one lock/unlock
394 * pair in the !swap_backed case since we have to
395 * lock the uobj to drop the cluster anyway]
396 * 4. on errors we always drop the cluster. thus, if we return
397 * !PEND, !OK, then the caller only has to worry about
398 * un-busying the main page (not the cluster pages).
399 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
400 * with all pages busy (caller must un-busy and check
401 * wanted/released flags).
402 */
403
404 int
405 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
406 struct uvm_object *uobj; /* IN */
407 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
408 int *npages; /* IN/OUT */
409 int flags; /* IN */
410 voff_t start, stop; /* IN, IN */
411 {
412 int result;
413 daddr_t swblk;
414 struct vm_page **ppsp = *ppsp_ptr;
415
416 /*
417 * note that uobj is null if we are doing a swap-backed pageout.
418 * note that uobj is !null if we are doing normal object pageout.
419 * note that the page queues must be locked to cluster.
420 */
421
422 if (uobj) { /* if !swap-backed */
423
424 /*
425 * attempt to build a cluster for pageout using its
426 * make-put-cluster function (if it has one).
427 */
428
429 if (uobj->pgops->pgo_mk_pcluster) {
430 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
431 npages, pg, flags, start, stop);
432 *ppsp_ptr = ppsp; /* update caller's pointer */
433 } else {
434 ppsp[0] = pg;
435 *npages = 1;
436 }
437
438 swblk = 0; /* XXX: keep gcc happy */
439
440 } else {
441
442 /*
443 * for swap-backed pageout, the caller (the pagedaemon) has
444 * already built the cluster for us. the starting swap
445 * block we are writing to has been passed in as "start."
446 * "pg" could be NULL if there is no page we are especially
447 * interested in (in which case the whole cluster gets dropped
448 * in the event of an error or a sync "done").
449 */
450 swblk = (daddr_t) start;
451 /* ppsp and npages should be ok */
452 }
453
454 /* now that we've clustered we can unlock the page queues */
455 uvm_unlock_pageq();
456
457 /*
458 * now attempt the I/O. if we have a failure and we are
459 * clustered, we will drop the cluster and try again.
460 */
461
462 ReTry:
463 if (uobj) {
464 /* object is locked */
465 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
466 flags & PGO_SYNCIO);
467 /* object is now unlocked */
468 } else {
469 /* nothing locked */
470 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
471 /* nothing locked */
472 }
473
474 /*
475 * we have attempted the I/O.
476 *
477 * if the I/O was a success then:
478 * if !PGO_PDFREECLUST, we return the cluster to the
479 * caller (who must un-busy all pages)
480 * else we un-busy cluster pages for the pagedaemon
481 *
482 * if I/O is pending (async i/o) then we return the pending code.
483 * [in this case the async i/o done function must clean up when
484 * i/o is done...]
485 */
486
487 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
488 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
489 /*
490 * drop cluster and relock object (only if I/O is
491 * not pending)
492 */
493 if (uobj)
494 /* required for dropcluster */
495 simple_lock(&uobj->vmobjlock);
496 if (*npages > 1 || pg == NULL)
497 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
498 PGO_PDFREECLUST);
499 /* if (uobj): object still locked, as per
500 * return-state item #3 */
501 }
502 return (result);
503 }
504
505 /*
506 * a pager error occured.
507 * for transient errors, drop to a cluster of 1 page ("pg")
508 * and try again. for hard errors, don't bother retrying.
509 */
510
511 if (*npages > 1 || pg == NULL) {
512 if (uobj) {
513 simple_lock(&uobj->vmobjlock);
514 }
515 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
516
517 /*
518 * for failed swap-backed pageouts with a "pg",
519 * we need to reset pg's swslot to either:
520 * "swblk" (for transient errors, so we can retry),
521 * or 0 (for hard errors).
522 */
523
524 if (uobj == NULL && pg != NULL) {
525 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
526 if (pg->pqflags & PQ_ANON) {
527 simple_lock(&pg->uanon->an_lock);
528 pg->uanon->an_swslot = nswblk;
529 simple_unlock(&pg->uanon->an_lock);
530 } else {
531 simple_lock(&pg->uobject->vmobjlock);
532 uao_set_swslot(pg->uobject,
533 pg->offset >> PAGE_SHIFT,
534 nswblk);
535 simple_unlock(&pg->uobject->vmobjlock);
536 }
537 }
538 if (result == VM_PAGER_AGAIN) {
539
540 /*
541 * for transient failures, free all the swslots that
542 * we're not going to retry with.
543 */
544
545 if (uobj == NULL) {
546 if (pg) {
547 uvm_swap_free(swblk + 1, *npages - 1);
548 } else {
549 uvm_swap_free(swblk, *npages);
550 }
551 }
552 if (pg) {
553 ppsp[0] = pg;
554 *npages = 1;
555 goto ReTry;
556 }
557 } else if (uobj == NULL) {
558
559 /*
560 * for hard errors on swap-backed pageouts,
561 * mark the swslots as bad. note that we do not
562 * free swslots that we mark bad.
563 */
564
565 uvm_swap_markbad(swblk, *npages);
566 }
567 }
568
569 /*
570 * a pager error occured (even after dropping the cluster, if there
571 * was one). give up! the caller only has one page ("pg")
572 * to worry about.
573 */
574
575 if (uobj && (flags & PGO_PDFREECLUST) != 0)
576 simple_lock(&uobj->vmobjlock);
577 return(result);
578 }
579
580 /*
581 * uvm_pager_dropcluster: drop a cluster we have built (because we
582 * got an error, or, if PGO_PDFREECLUST we are un-busying the
583 * cluster pages on behalf of the pagedaemon).
584 *
585 * => uobj, if non-null, is a non-swap-backed object that is
586 * locked by the caller. we return with this object still
587 * locked.
588 * => page queues are not locked
589 * => pg is our page of interest (the one we clustered around, can be null)
590 * => ppsp/npages is our current cluster
591 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
592 * pages on behalf of the pagedaemon.
593 * PGO_REALLOCSWAP: drop previously allocated swap slots for
594 * clustered swap-backed pages (except for "pg" if !NULL)
595 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
596 * [only meaningful if swap-backed (uobj == NULL)]
597 */
598
599 void
600 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
601 struct uvm_object *uobj; /* IN */
602 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
603 int *npages; /* IN/OUT */
604 int flags;
605 {
606 int lcv;
607 boolean_t obj_is_alive;
608 struct uvm_object *saved_uobj;
609
610 /*
611 * drop all pages but "pg"
612 */
613
614 for (lcv = 0 ; lcv < *npages ; lcv++) {
615
616 if (ppsp[lcv] == pg) /* skip "pg" */
617 continue;
618
619 /*
620 * if swap-backed, gain lock on object that owns page. note
621 * that PQ_ANON bit can't change as long as we are holding
622 * the PG_BUSY bit (so there is no need to lock the page
623 * queues to test it).
624 *
625 * once we have the lock, dispose of the pointer to swap, if
626 * requested
627 */
628 if (!uobj) {
629 if (ppsp[lcv]->pqflags & PQ_ANON) {
630 simple_lock(&ppsp[lcv]->uanon->an_lock);
631 if (flags & PGO_REALLOCSWAP)
632 /* zap swap block */
633 ppsp[lcv]->uanon->an_swslot = 0;
634 } else {
635 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
636 if (flags & PGO_REALLOCSWAP)
637 uao_set_swslot(ppsp[lcv]->uobject,
638 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
639 }
640 }
641
642 /* did someone want the page while we had it busy-locked? */
643 if (ppsp[lcv]->flags & PG_WANTED)
644 /* still holding obj lock */
645 wakeup(ppsp[lcv]);
646
647 /* if page was released, release it. otherwise un-busy it */
648 if (ppsp[lcv]->flags & PG_RELEASED) {
649
650 if (ppsp[lcv]->pqflags & PQ_ANON) {
651 /* so that anfree will free */
652 ppsp[lcv]->flags &= ~(PG_BUSY);
653 UVM_PAGE_OWN(ppsp[lcv], NULL);
654
655 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
656 simple_unlock(&ppsp[lcv]->uanon->an_lock);
657 /* kills anon and frees pg */
658 uvm_anfree(ppsp[lcv]->uanon);
659
660 continue;
661 }
662
663 /*
664 * pgo_releasepg will dump the page for us
665 */
666
667 #ifdef DIAGNOSTIC
668 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
669 panic("uvm_pager_dropcluster: no releasepg "
670 "function");
671 #endif
672 saved_uobj = ppsp[lcv]->uobject;
673 obj_is_alive =
674 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
675
676 #ifdef DIAGNOSTIC
677 /* for normal objects, "pg" is still PG_BUSY by us,
678 * so obj can't die */
679 if (uobj && !obj_is_alive)
680 panic("uvm_pager_dropcluster: object died "
681 "with active page");
682 #endif
683 /* only unlock the object if it is still alive... */
684 if (obj_is_alive && saved_uobj != uobj)
685 simple_unlock(&saved_uobj->vmobjlock);
686
687 /*
688 * XXXCDC: suppose uobj died in the pgo_releasepg?
689 * how pass that
690 * info up to caller. we are currently ignoring it...
691 */
692
693 continue; /* next page */
694
695 } else {
696 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
697 UVM_PAGE_OWN(ppsp[lcv], NULL);
698 }
699
700 /*
701 * if we are operating on behalf of the pagedaemon and we
702 * had a successful pageout update the page!
703 */
704 if (flags & PGO_PDFREECLUST) {
705 pmap_clear_reference(ppsp[lcv]);
706 pmap_clear_modify(ppsp[lcv]);
707 ppsp[lcv]->flags |= PG_CLEAN;
708 }
709
710 /* if anonymous cluster, unlock object and move on */
711 if (!uobj) {
712 if (ppsp[lcv]->pqflags & PQ_ANON)
713 simple_unlock(&ppsp[lcv]->uanon->an_lock);
714 else
715 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
716 }
717 }
718 }
719