uvm_pager.c revision 1.19 1 /* $NetBSD: uvm_pager.c,v 1.19 1999/05/26 06:42:57 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_uvmhist.h"
38 #include "opt_pmap_new.h"
39
40 /*
41 * uvm_pager.c: generic functions used to assist the pagers.
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_page.h>
51 #include <vm/vm_kern.h>
52
53 #define UVM_PAGER
54 #include <uvm/uvm.h>
55
56 /*
57 * list of uvm pagers in the system
58 */
59
60 extern struct uvm_pagerops aobj_pager;
61 extern struct uvm_pagerops uvm_deviceops;
62 extern struct uvm_pagerops uvm_vnodeops;
63
64 struct uvm_pagerops *uvmpagerops[] = {
65 &aobj_pager,
66 &uvm_deviceops,
67 &uvm_vnodeops,
68 };
69
70 /*
71 * the pager map: provides KVA for I/O
72 */
73
74 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
75 vm_map_t pager_map; /* XXX */
76 simple_lock_data_t pager_map_wanted_lock;
77 boolean_t pager_map_wanted; /* locked by pager map */
78
79
80 /*
81 * uvm_pager_init: init pagers (at boot time)
82 */
83
84 void
85 uvm_pager_init()
86 {
87 int lcv;
88
89 /*
90 * init pager map
91 */
92
93 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
94 PAGER_MAP_SIZE, FALSE, FALSE, NULL);
95 simple_lock_init(&pager_map_wanted_lock);
96 pager_map_wanted = FALSE;
97
98 /*
99 * init ASYNC I/O queue
100 */
101
102 TAILQ_INIT(&uvm.aio_done);
103
104 /*
105 * call pager init functions
106 */
107 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
108 lcv++) {
109 if (uvmpagerops[lcv]->pgo_init)
110 uvmpagerops[lcv]->pgo_init();
111 }
112 }
113
114 /*
115 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
116 *
117 * we basically just map in a blank map entry to reserve the space in the
118 * map and then use pmap_enter() to put the mappings in by hand.
119 *
120 * XXX It would be nice to know the direction of the I/O, so that we can
121 * XXX map only what is necessary.
122 */
123
124 vaddr_t
125 uvm_pagermapin(pps, npages, aiop, waitf)
126 struct vm_page **pps;
127 int npages;
128 struct uvm_aiodesc **aiop; /* OUT */
129 int waitf;
130 {
131 vsize_t size;
132 vaddr_t kva;
133 struct uvm_aiodesc *aio;
134 vaddr_t cva;
135 struct vm_page *pp;
136 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
137
138 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
139 pps, npages, aiop, waitf);
140
141 ReStart:
142 if (aiop) {
143 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
144 if (aio == NULL)
145 return(0);
146 *aiop = aio;
147 } else {
148 aio = NULL;
149 }
150
151 size = npages << PAGE_SHIFT;
152 kva = NULL; /* let system choose VA */
153
154 if (uvm_map(pager_map, &kva, size, NULL,
155 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
156 if (waitf == M_NOWAIT) {
157 if (aio)
158 FREE(aio, M_TEMP);
159 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
160 return(NULL);
161 }
162 simple_lock(&pager_map_wanted_lock);
163 pager_map_wanted = TRUE;
164 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
165 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
166 "pager_map",0);
167 goto ReStart;
168 }
169
170 /* got it */
171 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
172 pp = *pps++;
173 #ifdef DEBUG
174 if ((pp->flags & PG_BUSY) == 0)
175 panic("uvm_pagermapin: page not busy");
176 #endif
177
178 /*
179 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
180 * XXX really necessary? It could lead to unnecessary
181 * XXX instruction cache flushes.
182 */
183 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
184 VM_PROT_DEFAULT, TRUE,
185 VM_PROT_READ | VM_PROT_WRITE);
186 }
187
188 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
189 return(kva);
190 }
191
192 /*
193 * uvm_pagermapout: remove pager_map mapping
194 *
195 * we remove our mappings by hand and then remove the mapping (waking
196 * up anyone wanting space).
197 */
198
199 void
200 uvm_pagermapout(kva, npages)
201 vaddr_t kva;
202 int npages;
203 {
204 vsize_t size = npages << PAGE_SHIFT;
205 vm_map_entry_t entries;
206 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
207
208 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
209
210 /*
211 * duplicate uvm_unmap, but add in pager_map_wanted handling.
212 */
213
214 vm_map_lock(pager_map);
215 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
216 simple_lock(&pager_map_wanted_lock);
217 if (pager_map_wanted) {
218 pager_map_wanted = FALSE;
219 wakeup(pager_map);
220 }
221 simple_unlock(&pager_map_wanted_lock);
222 vm_map_unlock(pager_map);
223 if (entries)
224 uvm_unmap_detach(entries, 0);
225
226 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
227 }
228
229 /*
230 * uvm_mk_pcluster
231 *
232 * generic "make 'pager put' cluster" function. a pager can either
233 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
234 * generic function, or [3] set it to a pager specific function.
235 *
236 * => caller must lock object _and_ pagequeues (since we need to look
237 * at active vs. inactive bits, etc.)
238 * => caller must make center page busy and write-protect it
239 * => we mark all cluster pages busy for the caller
240 * => the caller must unbusy all pages (and check wanted/released
241 * status if it drops the object lock)
242 * => flags:
243 * PGO_ALLPAGES: all pages in object are valid targets
244 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
245 * PGO_DOACTCLUST: include active pages in cluster.
246 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
247 * PG_CLEANCHK is only a hint, but clearing will help reduce
248 * the number of calls we make to the pmap layer.
249 */
250
251 struct vm_page **
252 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
253 struct uvm_object *uobj; /* IN */
254 struct vm_page **pps, *center; /* IN/OUT, IN */
255 int *npages, flags; /* IN/OUT, IN */
256 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
257 {
258 struct vm_page **ppsp, *pclust;
259 vaddr_t lo, hi, curoff;
260 int center_idx, forward;
261 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
262
263 /*
264 * center page should already be busy and write protected. XXX:
265 * suppose page is wired? if we lock, then a process could
266 * fault/block on it. if we don't lock, a process could write the
267 * pages in the middle of an I/O. (consider an msync()). let's
268 * lock it for now (better to delay than corrupt data?).
269 */
270
271 /*
272 * get cluster boundaries, check sanity, and apply our limits as well.
273 */
274
275 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
276 if ((flags & PGO_ALLPAGES) == 0) {
277 if (lo < mlo)
278 lo = mlo;
279 if (hi > mhi)
280 hi = mhi;
281 }
282 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
283 #ifdef DIAGNOSTIC
284 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
285 #endif
286 pps[0] = center;
287 *npages = 1;
288 return(pps);
289 }
290
291 /*
292 * now determine the center and attempt to cluster around the
293 * edges
294 */
295
296 center_idx = (center->offset - lo) >> PAGE_SHIFT;
297 pps[center_idx] = center; /* plug in the center page */
298 ppsp = &pps[center_idx];
299 *npages = 1;
300
301 /*
302 * attempt to cluster around the left [backward], and then
303 * the right side [forward].
304 *
305 * note that for inactive pages (pages that have been deactivated)
306 * there are no valid mappings and PG_CLEAN should be up to date.
307 * [i.e. there is no need to query the pmap with pmap_is_modified
308 * since there are no mappings].
309 */
310
311 for (forward = 0 ; forward <= 1 ; forward++) {
312
313 curoff = center->offset + (forward ? PAGE_SIZE : -PAGE_SIZE);
314 for ( ;(forward == 0 && curoff >= lo) ||
315 (forward && curoff < hi);
316 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
317
318 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
319 if (pclust == NULL)
320 break; /* no page */
321 /* handle active pages */
322 /* NOTE: inactive pages don't have pmap mappings */
323 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
324 if ((flags & PGO_DOACTCLUST) == 0)
325 /* dont want mapped pages at all */
326 break;
327
328 /* make sure "clean" bit is sync'd */
329 if ((pclust->flags & PG_CLEANCHK) == 0) {
330 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
331 == PG_CLEAN &&
332 pmap_is_modified(PMAP_PGARG(pclust)))
333 pclust->flags &= ~PG_CLEAN;
334 /* now checked */
335 pclust->flags |= PG_CLEANCHK;
336 }
337 }
338 /* is page available for cleaning and does it need it */
339 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
340 break; /* page is already clean or is busy */
341
342 /* yes! enroll the page in our array */
343 pclust->flags |= PG_BUSY; /* busy! */
344 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
345 /* XXX: protect wired page? see above comment. */
346 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
347 if (!forward) {
348 ppsp--; /* back up one page */
349 *ppsp = pclust;
350 } else {
351 /* move forward one page */
352 ppsp[*npages] = pclust;
353 }
354 *npages = *npages + 1;
355 }
356 }
357
358 /*
359 * done! return the cluster array to the caller!!!
360 */
361
362 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
363 return(ppsp);
364 }
365
366
367 /*
368 * uvm_shareprot: generic share protect routine
369 *
370 * => caller must lock map entry's map
371 * => caller must lock object pointed to by map entry
372 */
373
374 void
375 uvm_shareprot(entry, prot)
376 vm_map_entry_t entry;
377 vm_prot_t prot;
378 {
379 struct uvm_object *uobj = entry->object.uvm_obj;
380 struct vm_page *pp;
381 vaddr_t start, stop;
382 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
383
384 if (UVM_ET_ISSUBMAP(entry))
385 panic("uvm_shareprot: non-object attached");
386
387 start = entry->offset;
388 stop = start + (entry->end - entry->start);
389
390 /*
391 * traverse list of pages in object. if page in range, pmap_prot it
392 */
393
394 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
395 if (pp->offset >= start && pp->offset < stop)
396 pmap_page_protect(PMAP_PGARG(pp), prot);
397 }
398 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
399 }
400
401 /*
402 * uvm_pager_put: high level pageout routine
403 *
404 * we want to pageout page "pg" to backing store, clustering if
405 * possible.
406 *
407 * => page queues must be locked by caller
408 * => if page is not swap-backed, then "uobj" points to the object
409 * backing it. this object should be locked by the caller.
410 * => if page is swap-backed, then "uobj" should be NULL.
411 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
412 * for swap-backed memory, "pg" can be NULL if there is no page
413 * of interest [sometimes the case for the pagedaemon]
414 * => "ppsp_ptr" should point to an array of npages vm_page pointers
415 * for possible cluster building
416 * => flags (first two for non-swap-backed pages)
417 * PGO_ALLPAGES: all pages in uobj are valid targets
418 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
419 * PGO_SYNCIO: do SYNC I/O (no async)
420 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
421 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
422 * if (!uobj) start is the (daddr_t) of the starting swapblk
423 * => return state:
424 * 1. we return the VM_PAGER status code of the pageout
425 * 2. we return with the page queues unlocked
426 * 3. if (uobj != NULL) [!swap_backed] we return with
427 * uobj locked _only_ if PGO_PDFREECLUST is set
428 * AND result != VM_PAGER_PEND. in all other cases
429 * we return with uobj unlocked. [this is a hack
430 * that allows the pagedaemon to save one lock/unlock
431 * pair in the !swap_backed case since we have to
432 * lock the uobj to drop the cluster anyway]
433 * 4. on errors we always drop the cluster. thus, if we return
434 * !PEND, !OK, then the caller only has to worry about
435 * un-busying the main page (not the cluster pages).
436 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
437 * with all pages busy (caller must un-busy and check
438 * wanted/released flags).
439 */
440
441 int
442 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
443 struct uvm_object *uobj; /* IN */
444 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
445 int *npages; /* IN/OUT */
446 int flags; /* IN */
447 vaddr_t start, stop; /* IN, IN */
448 {
449 int result;
450 daddr_t swblk;
451 struct vm_page **ppsp = *ppsp_ptr;
452
453 /*
454 * note that uobj is null if we are doing a swap-backed pageout.
455 * note that uobj is !null if we are doing normal object pageout.
456 * note that the page queues must be locked to cluster.
457 */
458
459 if (uobj) { /* if !swap-backed */
460
461 /*
462 * attempt to build a cluster for pageout using its
463 * make-put-cluster function (if it has one).
464 */
465
466 if (uobj->pgops->pgo_mk_pcluster) {
467 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
468 npages, pg, flags, start, stop);
469 *ppsp_ptr = ppsp; /* update caller's pointer */
470 } else {
471 ppsp[0] = pg;
472 *npages = 1;
473 }
474
475 swblk = 0; /* XXX: keep gcc happy */
476
477 } else {
478
479 /*
480 * for swap-backed pageout, the caller (the pagedaemon) has
481 * already built the cluster for us. the starting swap
482 * block we are writing to has been passed in as "start."
483 * "pg" could be NULL if there is no page we are especially
484 * interested in (in which case the whole cluster gets dropped
485 * in the event of an error or a sync "done").
486 */
487 swblk = (daddr_t) start;
488 /* ppsp and npages should be ok */
489 }
490
491 /* now that we've clustered we can unlock the page queues */
492 uvm_unlock_pageq();
493
494 /*
495 * now attempt the I/O. if we have a failure and we are
496 * clustered, we will drop the cluster and try again.
497 */
498
499 ReTry:
500 if (uobj) {
501 /* object is locked */
502 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
503 flags & PGO_SYNCIO);
504 /* object is now unlocked */
505 } else {
506 /* nothing locked */
507 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
508 /* nothing locked */
509 }
510
511 /*
512 * we have attempted the I/O.
513 *
514 * if the I/O was a success then:
515 * if !PGO_PDFREECLUST, we return the cluster to the
516 * caller (who must un-busy all pages)
517 * else we un-busy cluster pages for the pagedaemon
518 *
519 * if I/O is pending (async i/o) then we return the pending code.
520 * [in this case the async i/o done function must clean up when
521 * i/o is done...]
522 */
523
524 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
525 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
526 /*
527 * drop cluster and relock object (only if I/O is
528 * not pending)
529 */
530 if (uobj)
531 /* required for dropcluster */
532 simple_lock(&uobj->vmobjlock);
533 if (*npages > 1 || pg == NULL)
534 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
535 PGO_PDFREECLUST, 0);
536 /* if (uobj): object still locked, as per
537 * return-state item #3 */
538 }
539 return (result);
540 }
541
542 /*
543 * a pager error occured. if we have clustered, we drop the
544 * cluster and try again.
545 */
546
547 if (*npages > 1 || pg == NULL) {
548 if (uobj)
549 simple_lock(&uobj->vmobjlock);
550 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
551 swblk);
552 if (pg != NULL)
553 goto ReTry;
554 }
555
556 /*
557 * a pager error occured (even after dropping the cluster, if there
558 * was one). give up! the caller only has one page ("pg")
559 * to worry about.
560 */
561
562 if (uobj && (flags & PGO_PDFREECLUST) != 0)
563 simple_lock(&uobj->vmobjlock);
564 return(result);
565 }
566
567 /*
568 * uvm_pager_dropcluster: drop a cluster we have built (because we
569 * got an error, or, if PGO_PDFREECLUST we are un-busying the
570 * cluster pages on behalf of the pagedaemon).
571 *
572 * => uobj, if non-null, is a non-swap-backed object that is
573 * locked by the caller. we return with this object still
574 * locked.
575 * => page queues are not locked
576 * => pg is our page of interest (the one we clustered around, can be null)
577 * => ppsp/npages is our current cluster
578 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
579 * pages on behalf of the pagedaemon.
580 * PGO_REALLOCSWAP: drop previously allocated swap slots for
581 * clustered swap-backed pages (except for "pg" if !NULL)
582 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
583 * [only meaningful if swap-backed (uobj == NULL)]
584 */
585
586
587 void uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
588
589 struct uvm_object *uobj; /* IN */
590 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
591 int *npages; /* IN/OUT */
592 int flags;
593 int swblk; /* valid if (uobj == NULL && PGO_REALLOCSWAP) */
594
595 {
596 int lcv;
597 boolean_t obj_is_alive;
598 struct uvm_object *saved_uobj;
599
600 /*
601 * if we need to reallocate swap space for the cluster we are dropping
602 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
603 * allocation now. save a block for "pg" if it is non-NULL.
604 *
605 * note that we will zap the object's pointer to swap in the "for" loop
606 * below...
607 */
608
609 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
610 if (pg)
611 uvm_swap_free(swblk + 1, *npages - 1);
612 else
613 uvm_swap_free(swblk, *npages);
614 }
615
616 /*
617 * drop all pages but "pg"
618 */
619
620 for (lcv = 0 ; lcv < *npages ; lcv++) {
621
622 if (ppsp[lcv] == pg) /* skip "pg" */
623 continue;
624
625 /*
626 * if swap-backed, gain lock on object that owns page. note
627 * that PQ_ANON bit can't change as long as we are holding
628 * the PG_BUSY bit (so there is no need to lock the page
629 * queues to test it).
630 *
631 * once we have the lock, dispose of the pointer to swap, if
632 * requested
633 */
634 if (!uobj) {
635 if (ppsp[lcv]->pqflags & PQ_ANON) {
636 simple_lock(&ppsp[lcv]->uanon->an_lock);
637 if (flags & PGO_REALLOCSWAP)
638 /* zap swap block */
639 ppsp[lcv]->uanon->an_swslot = 0;
640 } else {
641 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
642 if (flags & PGO_REALLOCSWAP)
643 uao_set_swslot(ppsp[lcv]->uobject,
644 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
645 }
646 }
647
648 /* did someone want the page while we had it busy-locked? */
649 if (ppsp[lcv]->flags & PG_WANTED)
650 /* still holding obj lock */
651 thread_wakeup(ppsp[lcv]);
652
653 /* if page was released, release it. otherwise un-busy it */
654 if (ppsp[lcv]->flags & PG_RELEASED) {
655
656 if (ppsp[lcv]->pqflags & PQ_ANON) {
657 /* so that anfree will free */
658 ppsp[lcv]->flags &= ~(PG_BUSY);
659 UVM_PAGE_OWN(ppsp[lcv], NULL);
660
661 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
662 VM_PROT_NONE); /* be safe */
663 simple_unlock(&ppsp[lcv]->uanon->an_lock);
664 /* kills anon and frees pg */
665 uvm_anfree(ppsp[lcv]->uanon);
666
667 continue;
668 }
669
670 /*
671 * pgo_releasepg will dump the page for us
672 */
673
674 #ifdef DIAGNOSTIC
675 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
676 panic("uvm_pager_dropcluster: no releasepg "
677 "function");
678 #endif
679 saved_uobj = ppsp[lcv]->uobject;
680 obj_is_alive =
681 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
682
683 #ifdef DIAGNOSTIC
684 /* for normal objects, "pg" is still PG_BUSY by us,
685 * so obj can't die */
686 if (uobj && !obj_is_alive)
687 panic("uvm_pager_dropcluster: object died "
688 "with active page");
689 #endif
690 /* only unlock the object if it is still alive... */
691 if (obj_is_alive && saved_uobj != uobj)
692 simple_unlock(&saved_uobj->vmobjlock);
693
694 /*
695 * XXXCDC: suppose uobj died in the pgo_releasepg?
696 * how pass that
697 * info up to caller. we are currently ignoring it...
698 */
699
700 continue; /* next page */
701
702 } else {
703 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
704 UVM_PAGE_OWN(ppsp[lcv], NULL);
705 }
706
707 /*
708 * if we are operating on behalf of the pagedaemon and we
709 * had a successful pageout update the page!
710 */
711 if (flags & PGO_PDFREECLUST) {
712 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
713 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
714 ppsp[lcv]->flags |= PG_CLEAN;
715 }
716
717 /* if anonymous cluster, unlock object and move on */
718 if (!uobj) {
719 if (ppsp[lcv]->pqflags & PQ_ANON)
720 simple_unlock(&ppsp[lcv]->uanon->an_lock);
721 else
722 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
723 }
724
725 }
726
727 /*
728 * drop to a cluster of 1 page ("pg") if requested
729 */
730
731 if (pg && (flags & PGO_PDFREECLUST) == 0) {
732 /*
733 * if we are not a successful pageout, we make a 1 page cluster.
734 */
735 ppsp[0] = pg;
736 *npages = 1;
737
738 /*
739 * assign new swap block to new cluster, if anon backed
740 */
741 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
742 if (pg->pqflags & PQ_ANON) {
743 simple_lock(&pg->uanon->an_lock);
744 pg->uanon->an_swslot = swblk; /* reassign */
745 simple_unlock(&pg->uanon->an_lock);
746 } else {
747 simple_lock(&pg->uobject->vmobjlock);
748 uao_set_swslot(pg->uobject,
749 pg->offset >> PAGE_SHIFT, swblk);
750 simple_unlock(&pg->uobject->vmobjlock);
751 }
752 }
753 }
754 }
755