uvm_pager.c revision 1.33 1 /* $NetBSD: uvm_pager.c,v 1.33 2000/09/13 15:00:25 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38
39 /*
40 * uvm_pager.c: generic functions used to assist the pagers.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/malloc.h>
47
48 #define UVM_PAGER
49 #include <uvm/uvm.h>
50
51 /*
52 * list of uvm pagers in the system
53 */
54
55 extern struct uvm_pagerops uvm_deviceops;
56 extern struct uvm_pagerops uvm_vnodeops;
57
58 struct uvm_pagerops *uvmpagerops[] = {
59 &aobj_pager,
60 &uvm_deviceops,
61 &uvm_vnodeops,
62 };
63
64 /*
65 * the pager map: provides KVA for I/O
66 */
67
68 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
69 vm_map_t pager_map; /* XXX */
70 simple_lock_data_t pager_map_wanted_lock;
71 boolean_t pager_map_wanted; /* locked by pager map */
72
73
74 /*
75 * uvm_pager_init: init pagers (at boot time)
76 */
77
78 void
79 uvm_pager_init()
80 {
81 int lcv;
82
83 /*
84 * init pager map
85 */
86
87 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
88 PAGER_MAP_SIZE, 0, FALSE, NULL);
89 simple_lock_init(&pager_map_wanted_lock);
90 pager_map_wanted = FALSE;
91
92 /*
93 * init ASYNC I/O queue
94 */
95
96 TAILQ_INIT(&uvm.aio_done);
97
98 /*
99 * call pager init functions
100 */
101 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
102 lcv++) {
103 if (uvmpagerops[lcv]->pgo_init)
104 uvmpagerops[lcv]->pgo_init();
105 }
106 }
107
108 /*
109 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
110 *
111 * we basically just map in a blank map entry to reserve the space in the
112 * map and then use pmap_enter() to put the mappings in by hand.
113 */
114
115 vaddr_t
116 uvm_pagermapin(pps, npages, aiop, flags)
117 struct vm_page **pps;
118 int npages;
119 struct uvm_aiodesc **aiop; /* OUT */
120 int flags;
121 {
122 vsize_t size;
123 vaddr_t kva;
124 struct uvm_aiodesc *aio;
125 vaddr_t cva;
126 struct vm_page *pp;
127 vm_prot_t prot;
128 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
129
130 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, flags=0x%x)",
131 pps, npages, aiop, flags);
132
133 /*
134 * compute protection. outgoing I/O only needs read
135 * access to the page, whereas incoming needs read/write.
136 */
137
138 prot = VM_PROT_READ;
139 if (flags & UVMPAGER_MAPIN_READ)
140 prot |= VM_PROT_WRITE;
141
142 ReStart:
143 if (aiop) {
144 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP,
145 (flags & UVMPAGER_MAPIN_WAITOK));
146 if (aio == NULL)
147 return(0);
148 *aiop = aio;
149 } else {
150 aio = NULL;
151 }
152
153 size = npages << PAGE_SHIFT;
154 kva = 0; /* let system choose VA */
155
156 if (uvm_map(pager_map, &kva, size, NULL,
157 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
158 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
159 if (aio)
160 FREE(aio, M_TEMP);
161 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
162 return(0);
163 }
164 simple_lock(&pager_map_wanted_lock);
165 pager_map_wanted = TRUE;
166 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
167 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
168 "pager_map",0);
169 goto ReStart;
170 }
171
172 /* got it */
173 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
174 pp = *pps++;
175 #ifdef DEBUG
176 if ((pp->flags & PG_BUSY) == 0)
177 panic("uvm_pagermapin: page not busy");
178 #endif
179 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
180 prot, PMAP_WIRED | prot);
181 }
182
183 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
184 return(kva);
185 }
186
187 /*
188 * uvm_pagermapout: remove pager_map mapping
189 *
190 * we remove our mappings by hand and then remove the mapping (waking
191 * up anyone wanting space).
192 */
193
194 void
195 uvm_pagermapout(kva, npages)
196 vaddr_t kva;
197 int npages;
198 {
199 vsize_t size = npages << PAGE_SHIFT;
200 vm_map_entry_t entries;
201 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
202
203 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
204
205 /*
206 * duplicate uvm_unmap, but add in pager_map_wanted handling.
207 */
208
209 vm_map_lock(pager_map);
210 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
211 simple_lock(&pager_map_wanted_lock);
212 if (pager_map_wanted) {
213 pager_map_wanted = FALSE;
214 wakeup(pager_map);
215 }
216 simple_unlock(&pager_map_wanted_lock);
217 vm_map_unlock(pager_map);
218 if (entries)
219 uvm_unmap_detach(entries, 0);
220
221 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
222 }
223
224 /*
225 * uvm_mk_pcluster
226 *
227 * generic "make 'pager put' cluster" function. a pager can either
228 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
229 * generic function, or [3] set it to a pager specific function.
230 *
231 * => caller must lock object _and_ pagequeues (since we need to look
232 * at active vs. inactive bits, etc.)
233 * => caller must make center page busy and write-protect it
234 * => we mark all cluster pages busy for the caller
235 * => the caller must unbusy all pages (and check wanted/released
236 * status if it drops the object lock)
237 * => flags:
238 * PGO_ALLPAGES: all pages in object are valid targets
239 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
240 * PGO_DOACTCLUST: include active pages in cluster.
241 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
242 * PG_CLEANCHK is only a hint, but clearing will help reduce
243 * the number of calls we make to the pmap layer.
244 */
245
246 struct vm_page **
247 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
248 struct uvm_object *uobj; /* IN */
249 struct vm_page **pps, *center; /* IN/OUT, IN */
250 int *npages, flags; /* IN/OUT, IN */
251 voff_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
252 {
253 struct vm_page **ppsp, *pclust;
254 voff_t lo, hi, curoff;
255 int center_idx, forward;
256 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
257
258 /*
259 * center page should already be busy and write protected. XXX:
260 * suppose page is wired? if we lock, then a process could
261 * fault/block on it. if we don't lock, a process could write the
262 * pages in the middle of an I/O. (consider an msync()). let's
263 * lock it for now (better to delay than corrupt data?).
264 */
265
266 /*
267 * get cluster boundaries, check sanity, and apply our limits as well.
268 */
269
270 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
271 if ((flags & PGO_ALLPAGES) == 0) {
272 if (lo < mlo)
273 lo = mlo;
274 if (hi > mhi)
275 hi = mhi;
276 }
277 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
278 #ifdef DIAGNOSTIC
279 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
280 #endif
281 pps[0] = center;
282 *npages = 1;
283 return(pps);
284 }
285
286 /*
287 * now determine the center and attempt to cluster around the
288 * edges
289 */
290
291 center_idx = (center->offset - lo) >> PAGE_SHIFT;
292 pps[center_idx] = center; /* plug in the center page */
293 ppsp = &pps[center_idx];
294 *npages = 1;
295
296 /*
297 * attempt to cluster around the left [backward], and then
298 * the right side [forward].
299 *
300 * note that for inactive pages (pages that have been deactivated)
301 * there are no valid mappings and PG_CLEAN should be up to date.
302 * [i.e. there is no need to query the pmap with pmap_is_modified
303 * since there are no mappings].
304 */
305
306 for (forward = 0 ; forward <= 1 ; forward++) {
307
308 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
309 for ( ;(forward == 0 && curoff >= lo) ||
310 (forward && curoff < hi);
311 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
312
313 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
314 if (pclust == NULL)
315 break; /* no page */
316 /* handle active pages */
317 /* NOTE: inactive pages don't have pmap mappings */
318 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
319 if ((flags & PGO_DOACTCLUST) == 0)
320 /* dont want mapped pages at all */
321 break;
322
323 /* make sure "clean" bit is sync'd */
324 if ((pclust->flags & PG_CLEANCHK) == 0) {
325 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
326 == PG_CLEAN &&
327 pmap_is_modified(pclust))
328 pclust->flags &= ~PG_CLEAN;
329
330 /* now checked */
331 pclust->flags |= PG_CLEANCHK;
332 }
333 }
334 /* is page available for cleaning and does it need it */
335 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
336 break; /* page is already clean or is busy */
337
338 /* yes! enroll the page in our array */
339 pclust->flags |= PG_BUSY; /* busy! */
340 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
341 /* XXX: protect wired page? see above comment. */
342 pmap_page_protect(pclust, VM_PROT_READ);
343 if (!forward) {
344 ppsp--; /* back up one page */
345 *ppsp = pclust;
346 } else {
347 /* move forward one page */
348 ppsp[*npages] = pclust;
349 }
350 *npages = *npages + 1;
351 }
352 }
353
354 /*
355 * done! return the cluster array to the caller!!!
356 */
357
358 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
359 return(ppsp);
360 }
361
362 /*
363 * uvm_pager_put: high level pageout routine
364 *
365 * we want to pageout page "pg" to backing store, clustering if
366 * possible.
367 *
368 * => page queues must be locked by caller
369 * => if page is not swap-backed, then "uobj" points to the object
370 * backing it. this object should be locked by the caller.
371 * => if page is swap-backed, then "uobj" should be NULL.
372 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
373 * for swap-backed memory, "pg" can be NULL if there is no page
374 * of interest [sometimes the case for the pagedaemon]
375 * => "ppsp_ptr" should point to an array of npages vm_page pointers
376 * for possible cluster building
377 * => flags (first two for non-swap-backed pages)
378 * PGO_ALLPAGES: all pages in uobj are valid targets
379 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
380 * PGO_SYNCIO: do SYNC I/O (no async)
381 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
382 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
383 * if (!uobj) start is the (daddr_t) of the starting swapblk
384 * => return state:
385 * 1. we return the VM_PAGER status code of the pageout
386 * 2. we return with the page queues unlocked
387 * 3. if (uobj != NULL) [!swap_backed] we return with
388 * uobj locked _only_ if PGO_PDFREECLUST is set
389 * AND result != VM_PAGER_PEND. in all other cases
390 * we return with uobj unlocked. [this is a hack
391 * that allows the pagedaemon to save one lock/unlock
392 * pair in the !swap_backed case since we have to
393 * lock the uobj to drop the cluster anyway]
394 * 4. on errors we always drop the cluster. thus, if we return
395 * !PEND, !OK, then the caller only has to worry about
396 * un-busying the main page (not the cluster pages).
397 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
398 * with all pages busy (caller must un-busy and check
399 * wanted/released flags).
400 */
401
402 int
403 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
404 struct uvm_object *uobj; /* IN */
405 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
406 int *npages; /* IN/OUT */
407 int flags; /* IN */
408 voff_t start, stop; /* IN, IN */
409 {
410 int result;
411 daddr_t swblk;
412 struct vm_page **ppsp = *ppsp_ptr;
413
414 /*
415 * note that uobj is null if we are doing a swap-backed pageout.
416 * note that uobj is !null if we are doing normal object pageout.
417 * note that the page queues must be locked to cluster.
418 */
419
420 if (uobj) { /* if !swap-backed */
421
422 /*
423 * attempt to build a cluster for pageout using its
424 * make-put-cluster function (if it has one).
425 */
426
427 if (uobj->pgops->pgo_mk_pcluster) {
428 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
429 npages, pg, flags, start, stop);
430 *ppsp_ptr = ppsp; /* update caller's pointer */
431 } else {
432 ppsp[0] = pg;
433 *npages = 1;
434 }
435
436 swblk = 0; /* XXX: keep gcc happy */
437
438 } else {
439
440 /*
441 * for swap-backed pageout, the caller (the pagedaemon) has
442 * already built the cluster for us. the starting swap
443 * block we are writing to has been passed in as "start."
444 * "pg" could be NULL if there is no page we are especially
445 * interested in (in which case the whole cluster gets dropped
446 * in the event of an error or a sync "done").
447 */
448 swblk = (daddr_t) start;
449 /* ppsp and npages should be ok */
450 }
451
452 /* now that we've clustered we can unlock the page queues */
453 uvm_unlock_pageq();
454
455 /*
456 * now attempt the I/O. if we have a failure and we are
457 * clustered, we will drop the cluster and try again.
458 */
459
460 ReTry:
461 if (uobj) {
462 /* object is locked */
463 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
464 flags & PGO_SYNCIO);
465 /* object is now unlocked */
466 } else {
467 /* nothing locked */
468 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
469 /* nothing locked */
470 }
471
472 /*
473 * we have attempted the I/O.
474 *
475 * if the I/O was a success then:
476 * if !PGO_PDFREECLUST, we return the cluster to the
477 * caller (who must un-busy all pages)
478 * else we un-busy cluster pages for the pagedaemon
479 *
480 * if I/O is pending (async i/o) then we return the pending code.
481 * [in this case the async i/o done function must clean up when
482 * i/o is done...]
483 */
484
485 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
486 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
487 /*
488 * drop cluster and relock object (only if I/O is
489 * not pending)
490 */
491 if (uobj)
492 /* required for dropcluster */
493 simple_lock(&uobj->vmobjlock);
494 if (*npages > 1 || pg == NULL)
495 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
496 PGO_PDFREECLUST);
497 /* if (uobj): object still locked, as per
498 * return-state item #3 */
499 }
500 return (result);
501 }
502
503 /*
504 * a pager error occured.
505 * for transient errors, drop to a cluster of 1 page ("pg")
506 * and try again. for hard errors, don't bother retrying.
507 */
508
509 if (*npages > 1 || pg == NULL) {
510 if (uobj) {
511 simple_lock(&uobj->vmobjlock);
512 }
513 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
514
515 /*
516 * for failed swap-backed pageouts with a "pg",
517 * we need to reset pg's swslot to either:
518 * "swblk" (for transient errors, so we can retry),
519 * or 0 (for hard errors).
520 */
521
522 if (uobj == NULL && pg != NULL) {
523 int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
524 if (pg->pqflags & PQ_ANON) {
525 simple_lock(&pg->uanon->an_lock);
526 pg->uanon->an_swslot = nswblk;
527 simple_unlock(&pg->uanon->an_lock);
528 } else {
529 simple_lock(&pg->uobject->vmobjlock);
530 uao_set_swslot(pg->uobject,
531 pg->offset >> PAGE_SHIFT,
532 nswblk);
533 simple_unlock(&pg->uobject->vmobjlock);
534 }
535 }
536 if (result == VM_PAGER_AGAIN) {
537
538 /*
539 * for transient failures, free all the swslots that
540 * we're not going to retry with.
541 */
542
543 if (uobj == NULL) {
544 if (pg) {
545 uvm_swap_free(swblk + 1, *npages - 1);
546 } else {
547 uvm_swap_free(swblk, *npages);
548 }
549 }
550 if (pg) {
551 ppsp[0] = pg;
552 *npages = 1;
553 goto ReTry;
554 }
555 } else if (uobj == NULL) {
556
557 /*
558 * for hard errors on swap-backed pageouts,
559 * mark the swslots as bad. note that we do not
560 * free swslots that we mark bad.
561 */
562
563 uvm_swap_markbad(swblk, *npages);
564 }
565 }
566
567 /*
568 * a pager error occured (even after dropping the cluster, if there
569 * was one). give up! the caller only has one page ("pg")
570 * to worry about.
571 */
572
573 if (uobj && (flags & PGO_PDFREECLUST) != 0)
574 simple_lock(&uobj->vmobjlock);
575 return(result);
576 }
577
578 /*
579 * uvm_pager_dropcluster: drop a cluster we have built (because we
580 * got an error, or, if PGO_PDFREECLUST we are un-busying the
581 * cluster pages on behalf of the pagedaemon).
582 *
583 * => uobj, if non-null, is a non-swap-backed object that is
584 * locked by the caller. we return with this object still
585 * locked.
586 * => page queues are not locked
587 * => pg is our page of interest (the one we clustered around, can be null)
588 * => ppsp/npages is our current cluster
589 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
590 * pages on behalf of the pagedaemon.
591 * PGO_REALLOCSWAP: drop previously allocated swap slots for
592 * clustered swap-backed pages (except for "pg" if !NULL)
593 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
594 * [only meaningful if swap-backed (uobj == NULL)]
595 */
596
597 void
598 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
599 struct uvm_object *uobj; /* IN */
600 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
601 int *npages; /* IN/OUT */
602 int flags;
603 {
604 int lcv;
605 boolean_t obj_is_alive;
606 struct uvm_object *saved_uobj;
607
608 /*
609 * drop all pages but "pg"
610 */
611
612 for (lcv = 0 ; lcv < *npages ; lcv++) {
613
614 if (ppsp[lcv] == pg) /* skip "pg" */
615 continue;
616
617 /*
618 * if swap-backed, gain lock on object that owns page. note
619 * that PQ_ANON bit can't change as long as we are holding
620 * the PG_BUSY bit (so there is no need to lock the page
621 * queues to test it).
622 *
623 * once we have the lock, dispose of the pointer to swap, if
624 * requested
625 */
626 if (!uobj) {
627 if (ppsp[lcv]->pqflags & PQ_ANON) {
628 simple_lock(&ppsp[lcv]->uanon->an_lock);
629 if (flags & PGO_REALLOCSWAP)
630 /* zap swap block */
631 ppsp[lcv]->uanon->an_swslot = 0;
632 } else {
633 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
634 if (flags & PGO_REALLOCSWAP)
635 uao_set_swslot(ppsp[lcv]->uobject,
636 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
637 }
638 }
639
640 /* did someone want the page while we had it busy-locked? */
641 if (ppsp[lcv]->flags & PG_WANTED)
642 /* still holding obj lock */
643 wakeup(ppsp[lcv]);
644
645 /* if page was released, release it. otherwise un-busy it */
646 if (ppsp[lcv]->flags & PG_RELEASED) {
647
648 if (ppsp[lcv]->pqflags & PQ_ANON) {
649 /* so that anfree will free */
650 ppsp[lcv]->flags &= ~(PG_BUSY);
651 UVM_PAGE_OWN(ppsp[lcv], NULL);
652
653 pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
654 simple_unlock(&ppsp[lcv]->uanon->an_lock);
655 /* kills anon and frees pg */
656 uvm_anfree(ppsp[lcv]->uanon);
657
658 continue;
659 }
660
661 /*
662 * pgo_releasepg will dump the page for us
663 */
664
665 #ifdef DIAGNOSTIC
666 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
667 panic("uvm_pager_dropcluster: no releasepg "
668 "function");
669 #endif
670 saved_uobj = ppsp[lcv]->uobject;
671 obj_is_alive =
672 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
673
674 #ifdef DIAGNOSTIC
675 /* for normal objects, "pg" is still PG_BUSY by us,
676 * so obj can't die */
677 if (uobj && !obj_is_alive)
678 panic("uvm_pager_dropcluster: object died "
679 "with active page");
680 #endif
681 /* only unlock the object if it is still alive... */
682 if (obj_is_alive && saved_uobj != uobj)
683 simple_unlock(&saved_uobj->vmobjlock);
684
685 /*
686 * XXXCDC: suppose uobj died in the pgo_releasepg?
687 * how pass that
688 * info up to caller. we are currently ignoring it...
689 */
690
691 continue; /* next page */
692
693 } else {
694 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
695 UVM_PAGE_OWN(ppsp[lcv], NULL);
696 }
697
698 /*
699 * if we are operating on behalf of the pagedaemon and we
700 * had a successful pageout update the page!
701 */
702 if (flags & PGO_PDFREECLUST) {
703 pmap_clear_reference(ppsp[lcv]);
704 pmap_clear_modify(ppsp[lcv]);
705 ppsp[lcv]->flags |= PG_CLEAN;
706 }
707
708 /* if anonymous cluster, unlock object and move on */
709 if (!uobj) {
710 if (ppsp[lcv]->pqflags & PQ_ANON)
711 simple_unlock(&ppsp[lcv]->uanon->an_lock);
712 else
713 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
714 }
715 }
716 }
717