uvm_pager.c revision 1.13.2.1 1 /* $NetBSD: uvm_pager.c,v 1.13.2.1 1998/11/09 06:06:39 chs Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
39 */
40
41 #include "opt_uvmhist.h"
42 #include "opt_pmap_new.h"
43
44 /*
45 * uvm_pager.c: generic functions used to assist the pagers.
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56
57 #define UVM_PAGER
58 #include <uvm/uvm.h>
59
60 /*
61 * list of uvm pagers in the system
62 */
63
64 extern struct uvm_pagerops aobj_pager;
65 extern struct uvm_pagerops uvm_deviceops;
66 extern struct uvm_pagerops uvm_vnodeops;
67 #ifdef UBC
68 extern struct uvm_pagerops ubc_pager;
69 #endif
70
71 struct uvm_pagerops *uvmpagerops[] = {
72 &aobj_pager,
73 &uvm_deviceops,
74 &uvm_vnodeops,
75 #ifdef UBC
76 &ubc_pager,
77 #endif
78 };
79
80 /*
81 * the pager map: provides KVA for I/O
82 */
83
84 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
85 vm_map_t pager_map; /* XXX */
86 simple_lock_data_t pager_map_wanted_lock;
87 boolean_t pager_map_wanted; /* locked by pager map */
88
89
90 /*
91 * uvm_pager_init: init pagers (at boot time)
92 */
93
94 void
95 uvm_pager_init()
96 {
97 int lcv;
98
99 /*
100 * init pager map
101 */
102
103 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
104 PAGER_MAP_SIZE, FALSE, FALSE, NULL);
105 simple_lock_init(&pager_map_wanted_lock);
106 pager_map_wanted = FALSE;
107
108 /*
109 * init ASYNC I/O queue
110 */
111
112 TAILQ_INIT(&uvm.aio_done);
113
114 /*
115 * call pager init functions
116 */
117 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
118 lcv++) {
119 if (uvmpagerops[lcv]->pgo_init)
120 uvmpagerops[lcv]->pgo_init();
121 }
122 }
123
124 /*
125 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
126 *
127 * we basically just map in a blank map entry to reserve the space in the
128 * map and then use pmap_enter() to put the mappings in by hand.
129 */
130
131 vaddr_t
132 uvm_pagermapin(pps, npages, aiop, waitf)
133 struct vm_page **pps;
134 int npages;
135 struct uvm_aiodesc **aiop; /* OUT */
136 int waitf;
137 {
138 vsize_t size;
139 vaddr_t kva;
140 struct uvm_aiodesc *aio;
141 #if !defined(PMAP_NEW)
142 vaddr_t cva;
143 struct vm_page *pp;
144 #endif
145 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
146
147 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
148 pps, npages, aiop, waitf);
149
150 ReStart:
151 if (aiop) {
152 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
153 if (aio == NULL)
154 return(0);
155 *aiop = aio;
156 } else {
157 aio = NULL;
158 }
159
160 size = npages << PAGE_SHIFT;
161 kva = NULL; /* let system choose VA */
162
163 if (uvm_map(pager_map, &kva, size, NULL,
164 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
165 if (waitf == M_NOWAIT) {
166 if (aio)
167 FREE(aio, M_TEMP);
168 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
169 return(NULL);
170 }
171 simple_lock(&pager_map_wanted_lock);
172 pager_map_wanted = TRUE;
173 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
174 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
175 "pager_map",0);
176 goto ReStart;
177 }
178
179 #if defined(PMAP_NEW)
180 /*
181 * XXX: (ab)using the pmap module to store state info for us.
182 * (pmap stores the PAs... we fetch them back later and convert back
183 * to pages with PHYS_TO_VM_PAGE).
184 */
185 pmap_kenter_pgs(kva, pps, npages);
186
187 #else /* PMAP_NEW */
188
189 /* got it */
190 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
191 pp = *pps++;
192 #ifdef DEBUG
193 if ((pp->flags & PG_BUSY) == 0)
194 panic("uvm_pagermapin: page not busy");
195 #endif
196
197 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
198 VM_PROT_DEFAULT, TRUE);
199 }
200
201 #endif /* PMAP_NEW */
202
203 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
204 return(kva);
205 }
206
207 /*
208 * uvm_pagermapout: remove pager_map mapping
209 *
210 * we remove our mappings by hand and then remove the mapping (waking
211 * up anyone wanting space).
212 */
213
214 void
215 uvm_pagermapout(kva, npages)
216 vaddr_t kva;
217 int npages;
218 {
219 vsize_t size = npages << PAGE_SHIFT;
220 vm_map_entry_t entries;
221 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
222
223 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
224
225 /*
226 * duplicate uvm_unmap, but add in pager_map_wanted handling.
227 */
228
229 vm_map_lock(pager_map);
230 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
231 simple_lock(&pager_map_wanted_lock);
232 if (pager_map_wanted) {
233 pager_map_wanted = FALSE;
234 wakeup(pager_map);
235 }
236 simple_unlock(&pager_map_wanted_lock);
237 vm_map_unlock(pager_map);
238 if (entries)
239 uvm_unmap_detach(entries, 0);
240
241 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
242 }
243
244 /*
245 * uvm_mk_pcluster
246 *
247 * generic "make 'pager put' cluster" function. a pager can either
248 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
249 * generic function, or [3] set it to a pager specific function.
250 *
251 * => caller must lock object _and_ pagequeues (since we need to look
252 * at active vs. inactive bits, etc.)
253 * => caller must make center page busy and write-protect it
254 * => we mark all cluster pages busy for the caller
255 * => the caller must unbusy all pages (and check wanted/released
256 * status if it drops the object lock)
257 * => flags:
258 * PGO_ALLPAGES: all pages in object are valid targets
259 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
260 * PGO_DOACTCLUST: include active pages in cluster.
261 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
262 * PG_CLEANCHK is only a hint, but clearing will help reduce
263 * the number of calls we make to the pmap layer.
264 */
265
266 struct vm_page **
267 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
268 struct uvm_object *uobj; /* IN */
269 struct vm_page **pps, *center; /* IN/OUT, IN */
270 int *npages, flags; /* IN/OUT, IN */
271 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
272 {
273 struct vm_page **ppsp, *pclust;
274 vaddr_t lo, hi, curoff;
275 int center_idx, forward;
276 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
277
278 /*
279 * center page should already be busy and write protected. XXX:
280 * suppose page is wired? if we lock, then a process could
281 * fault/block on it. if we don't lock, a process could write the
282 * pages in the middle of an I/O. (consider an msync()). let's
283 * lock it for now (better to delay than corrupt data?).
284 */
285
286 /*
287 * get cluster boundaries, check sanity, and apply our limits as well.
288 */
289
290 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
291 if ((flags & PGO_ALLPAGES) == 0) {
292 if (lo < mlo)
293 lo = mlo;
294 if (hi > mhi)
295 hi = mhi;
296 }
297 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
298 #ifdef DIAGNOSTIC
299 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
300 #endif
301 pps[0] = center;
302 *npages = 1;
303 return(pps);
304 }
305
306 /*
307 * now determine the center and attempt to cluster around the
308 * edges
309 */
310
311 center_idx = (center->offset - lo) >> PAGE_SHIFT;
312 pps[center_idx] = center; /* plug in the center page */
313 ppsp = &pps[center_idx];
314 *npages = 1;
315
316 /*
317 * attempt to cluster around the left [backward], and then
318 * the right side [forward].
319 *
320 * note that for inactive pages (pages that have been deactivated)
321 * there are no valid mappings and PG_CLEAN should be up to date.
322 * [i.e. there is no need to query the pmap with pmap_is_modified
323 * since there are no mappings].
324 */
325
326 for (forward = 0 ; forward <= 1 ; forward++) {
327
328 curoff = center->offset + PAGE_SIZE * (forward) ? 1 : -1;
329 for ( ;(forward == 0 && curoff >= lo) ||
330 (forward && curoff < hi);
331 curoff += (forward ? 1 : -1) << PAGE_SHIFT) {
332
333 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
334 if (pclust == NULL)
335 break; /* no page */
336 /* handle active pages */
337 /* NOTE: inactive pages don't have pmap mappings */
338 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
339 if ((flags & PGO_DOACTCLUST) == 0)
340 /* dont want mapped pages at all */
341 break;
342
343 /* make sure "clean" bit is sync'd */
344 if ((pclust->flags & PG_CLEANCHK) == 0) {
345 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
346 == PG_CLEAN &&
347 pmap_is_modified(PMAP_PGARG(pclust)))
348 pclust->flags &= ~PG_CLEAN;
349 /* now checked */
350 pclust->flags |= PG_CLEANCHK;
351 }
352 }
353 /* is page available for cleaning and does it need it */
354 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
355 break; /* page is already clean or is busy */
356
357 /* yes! enroll the page in our array */
358 pclust->flags |= PG_BUSY; /* busy! */
359 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
360 /* XXX: protect wired page? see above comment. */
361 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
362 if (!forward) {
363 ppsp--; /* back up one page */
364 *ppsp = pclust;
365 } else {
366 /* move forward one page */
367 ppsp[*npages] = pclust;
368 }
369 *npages = *npages + 1;
370 }
371 }
372
373 /*
374 * done! return the cluster array to the caller!!!
375 */
376
377 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
378 return(ppsp);
379 }
380
381
382 /*
383 * uvm_shareprot: generic share protect routine
384 *
385 * => caller must lock map entry's map
386 * => caller must lock object pointed to by map entry
387 */
388
389 void
390 uvm_shareprot(entry, prot)
391 vm_map_entry_t entry;
392 vm_prot_t prot;
393 {
394 struct uvm_object *uobj = entry->object.uvm_obj;
395 struct vm_page *pp;
396 vaddr_t start, stop;
397 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
398
399 if (UVM_ET_ISSUBMAP(entry))
400 panic("uvm_shareprot: non-object attached");
401
402 start = entry->offset;
403 stop = start + (entry->end - entry->start);
404
405 /*
406 * traverse list of pages in object. if page in range, pmap_prot it
407 */
408
409 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
410 if (pp->offset >= start && pp->offset < stop)
411 pmap_page_protect(PMAP_PGARG(pp), prot);
412 }
413 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
414 }
415
416 /*
417 * uvm_pager_put: high level pageout routine
418 *
419 * we want to pageout page "pg" to backing store, clustering if
420 * possible.
421 *
422 * => page queues must be locked by caller
423 * => if page is not swap-backed, then "uobj" points to the object
424 * backing it. this object should be locked by the caller.
425 * => if page is swap-backed, then "uobj" should be NULL.
426 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
427 * for swap-backed memory, "pg" can be NULL if there is no page
428 * of interest [sometimes the case for the pagedaemon]
429 * => "ppsp_ptr" should point to an array of npages vm_page pointers
430 * for possible cluster building
431 * => flags (first two for non-swap-backed pages)
432 * PGO_ALLPAGES: all pages in uobj are valid targets
433 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
434 * PGO_SYNCIO: do SYNC I/O (no async)
435 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
436 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
437 * if (!uobj) start is the (daddr_t) of the starting swapblk
438 * => return state:
439 * 1. we return the VM_PAGER status code of the pageout
440 * 2. we return with the page queues unlocked
441 * 3. if (uobj != NULL) [!swap_backed] we return with
442 * uobj locked _only_ if PGO_PDFREECLUST is set
443 * AND result != VM_PAGER_PEND. in all other cases
444 * we return with uobj unlocked. [this is a hack
445 * that allows the pagedaemon to save one lock/unlock
446 * pair in the !swap_backed case since we have to
447 * lock the uobj to drop the cluster anyway]
448 * 4. on errors we always drop the cluster. thus, if we return
449 * !PEND, !OK, then the caller only has to worry about
450 * un-busying the main page (not the cluster pages).
451 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
452 * with all pages busy (caller must un-busy and check
453 * wanted/released flags).
454 */
455
456 int
457 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
458 struct uvm_object *uobj; /* IN */
459 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
460 int *npages; /* IN/OUT */
461 int flags; /* IN */
462 vaddr_t start, stop; /* IN, IN */
463 {
464 int result;
465 daddr_t swblk;
466 struct vm_page **ppsp = *ppsp_ptr;
467
468 /*
469 * note that uobj is null if we are doing a swap-backed pageout.
470 * note that uobj is !null if we are doing normal object pageout.
471 * note that the page queues must be locked to cluster.
472 */
473
474 if (uobj) { /* if !swap-backed */
475
476 /*
477 * attempt to build a cluster for pageout using its
478 * make-put-cluster function (if it has one).
479 */
480
481 if (uobj->pgops->pgo_mk_pcluster) {
482 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
483 npages, pg, flags, start, stop);
484 *ppsp_ptr = ppsp; /* update caller's pointer */
485 } else {
486 ppsp[0] = pg;
487 *npages = 1;
488 }
489
490 swblk = 0; /* XXX: keep gcc happy */
491
492 } else {
493
494 /*
495 * for swap-backed pageout, the caller (the pagedaemon) has
496 * already built the cluster for us. the starting swap
497 * block we are writing to has been passed in as "start."
498 * "pg" could be NULL if there is no page we are especially
499 * interested in (in which case the whole cluster gets dropped
500 * in the event of an error or a sync "done").
501 */
502 swblk = (daddr_t) start;
503 /* ppsp and npages should be ok */
504 }
505
506 /* now that we've clustered we can unlock the page queues */
507 uvm_unlock_pageq();
508
509 /*
510 * now attempt the I/O. if we have a failure and we are
511 * clustered, we will drop the cluster and try again.
512 */
513
514 ReTry:
515 if (uobj) {
516 /* object is locked */
517 simple_lock_assert(&uobj->vmobjlock, 1);
518 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
519 /* object is now unlocked */
520 simple_lock_assert(&uobj->vmobjlock, 0);
521 } else {
522 /* nothing locked */
523 /* XXX should we pass more than just PGO_SYNCIO here too? */
524 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
525 /* nothing locked */
526 }
527
528 /*
529 * we have attempted the I/O.
530 *
531 * if the I/O was a success then:
532 * if !PGO_PDFREECLUST, we return the cluster to the
533 * caller (who must un-busy all pages)
534 * else we un-busy cluster pages for the pagedaemon
535 *
536 * if I/O is pending (async i/o) then we return the pending code.
537 * [in this case the async i/o done function must clean up when
538 * i/o is done...]
539 */
540
541 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
542 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
543 /*
544 * drop cluster and relock object (only if I/O is
545 * not pending)
546 */
547 if (uobj)
548 /* required for dropcluster */
549 simple_lock(&uobj->vmobjlock);
550 if (*npages > 1 || pg == NULL)
551 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
552 PGO_PDFREECLUST, 0);
553 /* if (uobj): object still locked, as per
554 * return-state item #3 */
555 }
556 return (result);
557 }
558
559 /*
560 * a pager error occured. if we have clustered, we drop the
561 * cluster and try again.
562 */
563
564 if (*npages > 1 || pg == NULL) {
565 if (uobj)
566 simple_lock(&uobj->vmobjlock);
567 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
568 swblk);
569 if (pg != NULL)
570 goto ReTry;
571 }
572
573 /*
574 * a pager error occured (even after dropping the cluster, if there
575 * was one). give up! the caller only has one page ("pg")
576 * to worry about.
577 */
578
579 if (uobj && (flags & PGO_PDFREECLUST) != 0)
580 simple_lock(&uobj->vmobjlock);
581 return(result);
582 }
583
584 /*
585 * uvm_pager_dropcluster: drop a cluster we have built (because we
586 * got an error, or, if PGO_PDFREECLUST we are un-busying the
587 * cluster pages on behalf of the pagedaemon).
588 *
589 * => uobj, if non-null, is a non-swap-backed object that is
590 * locked by the caller. we return with this object still
591 * locked.
592 * => page queues are not locked
593 * => pg is our page of interest (the one we clustered around, can be null)
594 * => ppsp/npages is our current cluster
595 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
596 * pages on behalf of the pagedaemon.
597 * PGO_REALLOCSWAP: drop previously allocated swap slots for
598 * clustered swap-backed pages (except for "pg" if !NULL)
599 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
600 * [only meaningful if swap-backed (uobj == NULL)]
601 */
602
603
604 void uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
605
606 struct uvm_object *uobj; /* IN */
607 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
608 int *npages; /* IN/OUT */
609 int flags;
610 int swblk; /* valid if (uobj == NULL && PGO_REALLOCSWAP) */
611
612 {
613 int lcv;
614 boolean_t obj_is_alive;
615 struct uvm_object *saved_uobj;
616
617 /*
618 * if we need to reallocate swap space for the cluster we are dropping
619 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
620 * allocation now. save a block for "pg" if it is non-NULL.
621 *
622 * note that we will zap the object's pointer to swap in the "for" loop
623 * below...
624 */
625
626 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
627 if (pg)
628 uvm_swap_free(swblk + 1, *npages - 1);
629 else
630 uvm_swap_free(swblk, *npages);
631 }
632
633 /*
634 * drop all pages but "pg"
635 */
636
637 for (lcv = 0 ; lcv < *npages ; lcv++) {
638
639 if (ppsp[lcv] == pg) /* skip "pg" */
640 continue;
641
642 /*
643 * if swap-backed, gain lock on object that owns page. note
644 * that PQ_ANON bit can't change as long as we are holding
645 * the PG_BUSY bit (so there is no need to lock the page
646 * queues to test it).
647 *
648 * once we have the lock, dispose of the pointer to swap, if
649 * requested
650 */
651 if (!uobj) {
652 if (ppsp[lcv]->pqflags & PQ_ANON) {
653 simple_lock(&ppsp[lcv]->uanon->an_lock);
654 if (flags & PGO_REALLOCSWAP)
655 /* zap swap block */
656 ppsp[lcv]->uanon->an_swslot = 0;
657 } else {
658 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
659 if (flags & PGO_REALLOCSWAP)
660 uao_set_swslot(ppsp[lcv]->uobject,
661 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
662 }
663 }
664
665 /* did someone want the page while we had it busy-locked? */
666 if (ppsp[lcv]->flags & PG_WANTED)
667 /* still holding obj lock */
668 thread_wakeup(ppsp[lcv]);
669
670 /* if page was released, release it. otherwise un-busy it */
671 if (ppsp[lcv]->flags & PG_RELEASED) {
672
673 if (ppsp[lcv]->pqflags & PQ_ANON) {
674 /* so that anfree will free */
675 ppsp[lcv]->flags &= ~(PG_BUSY);
676 UVM_PAGE_OWN(ppsp[lcv], NULL);
677
678 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
679 VM_PROT_NONE); /* be safe */
680 simple_unlock(&ppsp[lcv]->uanon->an_lock);
681 /* kills anon and frees pg */
682 uvm_anfree(ppsp[lcv]->uanon);
683
684 continue;
685 }
686
687 /*
688 * pgo_releasepg will dump the page for us
689 */
690
691 #ifdef DIAGNOSTIC
692 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
693 panic("uvm_pager_dropcluster: no releasepg "
694 "function");
695 #endif
696 saved_uobj = ppsp[lcv]->uobject;
697 obj_is_alive =
698 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
699
700 #ifdef DIAGNOSTIC
701 /* for normal objects, "pg" is still PG_BUSY by us,
702 * so obj can't die */
703 if (uobj && !obj_is_alive)
704 panic("uvm_pager_dropcluster: object died "
705 "with active page");
706 #endif
707 /* only unlock the object if it is still alive... */
708 if (obj_is_alive && saved_uobj != uobj)
709 simple_unlock(&saved_uobj->vmobjlock);
710
711 /*
712 * XXXCDC: suppose uobj died in the pgo_releasepg?
713 * how pass that
714 * info up to caller. we are currently ignoring it...
715 */
716
717 continue; /* next page */
718
719 } else {
720 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
721 UVM_PAGE_OWN(ppsp[lcv], NULL);
722 }
723
724 /*
725 * if we are operating on behalf of the pagedaemon and we
726 * had a successful pageout update the page!
727 */
728 if (flags & PGO_PDFREECLUST) {
729 /* XXX: with PMAP_NEW ref should already be clear,
730 * but don't trust! */
731 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
732 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
733 ppsp[lcv]->flags |= PG_CLEAN;
734 }
735
736 /* if anonymous cluster, unlock object and move on */
737 if (!uobj) {
738 if (ppsp[lcv]->pqflags & PQ_ANON)
739 simple_unlock(&ppsp[lcv]->uanon->an_lock);
740 else
741 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
742 }
743
744 }
745
746 /*
747 * drop to a cluster of 1 page ("pg") if requested
748 */
749
750 if (pg && (flags & PGO_PDFREECLUST) == 0) {
751 /*
752 * if we are not a successful pageout, we make a 1 page cluster.
753 */
754 ppsp[0] = pg;
755 *npages = 1;
756
757 /*
758 * assign new swap block to new cluster, if anon backed
759 */
760 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
761 if (pg->pqflags & PQ_ANON) {
762 simple_lock(&pg->uanon->an_lock);
763 pg->uanon->an_swslot = swblk; /* reassign */
764 simple_unlock(&pg->uanon->an_lock);
765 } else {
766 simple_lock(&pg->uobject->vmobjlock);
767 uao_set_swslot(pg->uobject,
768 pg->offset >> PAGE_SHIFT, swblk);
769 simple_unlock(&pg->uobject->vmobjlock);
770 }
771 }
772 }
773 }
774