uvm_pager.c revision 1.11 1 /* $NetBSD: uvm_pager.c,v 1.11 1998/10/11 23:18:20 chuck Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
39 */
40
41 #include "opt_uvmhist.h"
42 #include "opt_pmap_new.h"
43
44 /*
45 * uvm_pager.c: generic functions used to assist the pagers.
46 */
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_kern.h>
56
57 #define UVM_PAGER
58 #include <uvm/uvm.h>
59
60 /*
61 * list of uvm pagers in the system
62 */
63
64 extern struct uvm_pagerops aobj_pager;
65 extern struct uvm_pagerops uvm_deviceops;
66 extern struct uvm_pagerops uvm_vnodeops;
67
68 struct uvm_pagerops *uvmpagerops[] = {
69 &aobj_pager,
70 &uvm_deviceops,
71 &uvm_vnodeops,
72 };
73
74 /*
75 * the pager map: provides KVA for I/O
76 */
77
78 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
79 vm_map_t pager_map; /* XXX */
80 simple_lock_data_t pager_map_wanted_lock;
81 boolean_t pager_map_wanted; /* locked by pager map */
82
83
84 /*
85 * uvm_pager_init: init pagers (at boot time)
86 */
87
88 void
89 uvm_pager_init()
90 {
91 int lcv;
92
93 /*
94 * init pager map
95 */
96
97 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
98 PAGER_MAP_SIZE, FALSE, FALSE, NULL);
99 simple_lock_init(&pager_map_wanted_lock);
100 pager_map_wanted = FALSE;
101
102 /*
103 * init ASYNC I/O queue
104 */
105
106 TAILQ_INIT(&uvm.aio_done);
107
108 /*
109 * call pager init functions
110 */
111 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
112 lcv++) {
113 if (uvmpagerops[lcv]->pgo_init)
114 uvmpagerops[lcv]->pgo_init();
115 }
116 }
117
118 /*
119 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
120 *
121 * we basically just map in a blank map entry to reserve the space in the
122 * map and then use pmap_enter() to put the mappings in by hand.
123 */
124
125 vaddr_t
126 uvm_pagermapin(pps, npages, aiop, waitf)
127 struct vm_page **pps;
128 int npages;
129 struct uvm_aiodesc **aiop; /* OUT */
130 int waitf;
131 {
132 vsize_t size;
133 vaddr_t kva;
134 struct uvm_aiodesc *aio;
135 #if !defined(PMAP_NEW)
136 vaddr_t cva;
137 struct vm_page *pp;
138 #endif
139 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
140
141 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
142 pps, npages, aiop, waitf);
143
144 ReStart:
145 if (aiop) {
146 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
147 if (aio == NULL)
148 return(0);
149 *aiop = aio;
150 } else {
151 aio = NULL;
152 }
153
154 size = npages * PAGE_SIZE;
155 kva = NULL; /* let system choose VA */
156
157 if (uvm_map(pager_map, &kva, size, NULL,
158 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
159 if (waitf == M_NOWAIT) {
160 if (aio)
161 FREE(aio, M_TEMP);
162 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
163 return(NULL);
164 }
165 simple_lock(&pager_map_wanted_lock);
166 pager_map_wanted = TRUE;
167 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
168 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
169 "pager_map",0);
170 goto ReStart;
171 }
172
173 #if defined(PMAP_NEW)
174 /*
175 * XXX: (ab)using the pmap module to store state info for us.
176 * (pmap stores the PAs... we fetch them back later and convert back
177 * to pages with PHYS_TO_VM_PAGE).
178 */
179 pmap_kenter_pgs(kva, pps, npages);
180
181 #else /* PMAP_NEW */
182
183 /* got it */
184 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
185 pp = *pps++;
186 #ifdef DEBUG
187 if ((pp->flags & PG_BUSY) == 0)
188 panic("uvm_pagermapin: page not busy");
189 #endif
190
191 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
192 VM_PROT_DEFAULT, TRUE);
193 }
194
195 #endif /* PMAP_NEW */
196
197 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
198 return(kva);
199 }
200
201 /*
202 * uvm_pagermapout: remove pager_map mapping
203 *
204 * we remove our mappings by hand and then remove the mapping (waking
205 * up anyone wanting space).
206 */
207
208 void
209 uvm_pagermapout(kva, npages)
210 vaddr_t kva;
211 int npages;
212 {
213 vsize_t size = npages * PAGE_SIZE;
214 vm_map_entry_t entries;
215 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
216
217 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
218
219 /*
220 * duplicate uvm_unmap, but add in pager_map_wanted handling.
221 */
222
223 vm_map_lock(pager_map);
224 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
225 simple_lock(&pager_map_wanted_lock);
226 if (pager_map_wanted) {
227 pager_map_wanted = FALSE;
228 wakeup(pager_map);
229 }
230 simple_unlock(&pager_map_wanted_lock);
231 vm_map_unlock(pager_map);
232 if (entries)
233 uvm_unmap_detach(entries, 0);
234
235 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
236 }
237
238 /*
239 * uvm_mk_pcluster
240 *
241 * generic "make 'pager put' cluster" function. a pager can either
242 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
243 * generic function, or [3] set it to a pager specific function.
244 *
245 * => caller must lock object _and_ pagequeues (since we need to look
246 * at active vs. inactive bits, etc.)
247 * => caller must make center page busy and write-protect it
248 * => we mark all cluster pages busy for the caller
249 * => the caller must unbusy all pages (and check wanted/released
250 * status if it drops the object lock)
251 * => flags:
252 * PGO_ALLPAGES: all pages in object are valid targets
253 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
254 * PGO_DOACTCLUST: include active pages in cluster.
255 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
256 * PG_CLEANCHK is only a hint, but clearing will help reduce
257 * the number of calls we make to the pmap layer.
258 */
259
260 struct vm_page **
261 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
262 struct uvm_object *uobj; /* IN */
263 struct vm_page **pps, *center; /* IN/OUT, IN */
264 int *npages, flags; /* IN/OUT, IN */
265 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
266 {
267 struct vm_page **ppsp, *pclust;
268 vaddr_t lo, hi, curoff;
269 int center_idx, forward;
270 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
271
272 /*
273 * center page should already be busy and write protected. XXX:
274 * suppose page is wired? if we lock, then a process could
275 * fault/block on it. if we don't lock, a process could write the
276 * pages in the middle of an I/O. (consider an msync()). let's
277 * lock it for now (better to delay than corrupt data?).
278 */
279
280 /*
281 * get cluster boundaries, check sanity, and apply our limits as well.
282 */
283
284 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
285 if ((flags & PGO_ALLPAGES) == 0) {
286 if (lo < mlo)
287 lo = mlo;
288 if (hi > mhi)
289 hi = mhi;
290 }
291 if ((hi - lo) / PAGE_SIZE > *npages) { /* pps too small, bail out! */
292 #ifdef DIAGNOSTIC
293 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
294 #endif
295 pps[0] = center;
296 *npages = 1;
297 return(pps);
298 }
299
300 /*
301 * now determine the center and attempt to cluster around the
302 * edges
303 */
304
305 center_idx = (center->offset - lo) / PAGE_SIZE;
306 pps[center_idx] = center; /* plug in the center page */
307 ppsp = &pps[center_idx];
308 *npages = 1;
309
310 /*
311 * attempt to cluster around the left [backward], and then
312 * the right side [forward].
313 *
314 * note that for inactive pages (pages that have been deactivated)
315 * there are no valid mappings and PG_CLEAN should be up to date.
316 * [i.e. there is no need to query the pmap with pmap_is_modified
317 * since there are no mappings].
318 */
319
320 for (forward = 0 ; forward <= 1 ; forward++) {
321
322 curoff = center->offset + PAGE_SIZE * (forward) ? 1 : -1;
323 for ( ;(forward == 0 && curoff >= lo) ||
324 (forward && curoff < hi); curoff +=
325 PAGE_SIZE * (forward) ? 1 : -1) {
326
327 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
328 if (pclust == NULL)
329 break; /* no page */
330 /* handle active pages */
331 /* NOTE: inactive pages don't have pmap mappings */
332 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
333 if ((flags & PGO_DOACTCLUST) == 0)
334 /* dont want mapped pages at all */
335 break;
336
337 /* make sure "clean" bit is sync'd */
338 if ((pclust->flags & PG_CLEANCHK) == 0) {
339 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
340 == PG_CLEAN &&
341 pmap_is_modified(PMAP_PGARG(pclust)))
342 pclust->flags &= ~PG_CLEAN;
343 /* now checked */
344 pclust->flags |= PG_CLEANCHK;
345 }
346 }
347 /* is page available for cleaning and does it need it */
348 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
349 break; /* page is already clean or is busy */
350
351 /* yes! enroll the page in our array */
352 pclust->flags |= PG_BUSY; /* busy! */
353 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
354 /* XXX: protect wired page? see above comment. */
355 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
356 if (!forward) {
357 ppsp--; /* back up one page */
358 *ppsp = pclust;
359 } else {
360 /* move forward one page */
361 ppsp[*npages] = pclust;
362 }
363 *npages = *npages + 1;
364 }
365 }
366
367 /*
368 * done! return the cluster array to the caller!!!
369 */
370
371 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
372 return(ppsp);
373 }
374
375
376 /*
377 * uvm_shareprot: generic share protect routine
378 *
379 * => caller must lock map entry's map
380 * => caller must lock object pointed to by map entry
381 */
382
383 void
384 uvm_shareprot(entry, prot)
385 vm_map_entry_t entry;
386 vm_prot_t prot;
387 {
388 struct uvm_object *uobj = entry->object.uvm_obj;
389 struct vm_page *pp;
390 vaddr_t start, stop;
391 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
392
393 if (UVM_ET_ISSUBMAP(entry))
394 panic("uvm_shareprot: non-object attached");
395
396 start = entry->offset;
397 stop = start + (entry->end - entry->start);
398
399 /*
400 * traverse list of pages in object. if page in range, pmap_prot it
401 */
402
403 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
404 if (pp->offset >= start && pp->offset < stop)
405 pmap_page_protect(PMAP_PGARG(pp), prot);
406 }
407 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
408 }
409
410 /*
411 * uvm_pager_put: high level pageout routine
412 *
413 * we want to pageout page "pg" to backing store, clustering if
414 * possible.
415 *
416 * => page queues must be locked by caller
417 * => if page is not swap-backed, then "uobj" points to the object
418 * backing it. this object should be locked by the caller.
419 * => if page is swap-backed, then "uobj" should be NULL.
420 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
421 * for swap-backed memory, "pg" can be NULL if there is no page
422 * of interest [sometimes the case for the pagedaemon]
423 * => "ppsp_ptr" should point to an array of npages vm_page pointers
424 * for possible cluster building
425 * => flags (first two for non-swap-backed pages)
426 * PGO_ALLPAGES: all pages in uobj are valid targets
427 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
428 * PGO_SYNCIO: do SYNC I/O (no async)
429 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
430 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
431 * if (!uobj) start is the (daddr_t) of the starting swapblk
432 * => return state:
433 * 1. we return the VM_PAGER status code of the pageout
434 * 2. we return with the page queues unlocked
435 * 3. if (uobj != NULL) [!swap_backed] we return with
436 * uobj locked _only_ if PGO_PDFREECLUST is set
437 * AND result != VM_PAGER_PEND. in all other cases
438 * we return with uobj unlocked. [this is a hack
439 * that allows the pagedaemon to save one lock/unlock
440 * pair in the !swap_backed case since we have to
441 * lock the uobj to drop the cluster anyway]
442 * 4. on errors we always drop the cluster. thus, if we return
443 * !PEND, !OK, then the caller only has to worry about
444 * un-busying the main page (not the cluster pages).
445 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
446 * with all pages busy (caller must un-busy and check
447 * wanted/released flags).
448 */
449
450 int
451 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
452 struct uvm_object *uobj; /* IN */
453 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
454 int *npages; /* IN/OUT */
455 int flags; /* IN */
456 vaddr_t start, stop; /* IN, IN */
457 {
458 int result;
459 daddr_t swblk;
460 struct vm_page **ppsp = *ppsp_ptr;
461
462 /*
463 * note that uobj is null if we are doing a swap-backed pageout.
464 * note that uobj is !null if we are doing normal object pageout.
465 * note that the page queues must be locked to cluster.
466 */
467
468 if (uobj) { /* if !swap-backed */
469
470 /*
471 * attempt to build a cluster for pageout using its
472 * make-put-cluster function (if it has one).
473 */
474
475 if (uobj->pgops->pgo_mk_pcluster) {
476 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
477 npages, pg, flags, start, stop);
478 *ppsp_ptr = ppsp; /* update caller's pointer */
479 } else {
480 ppsp[0] = pg;
481 *npages = 1;
482 }
483
484 swblk = 0; /* XXX: keep gcc happy */
485
486 } else {
487
488 /*
489 * for swap-backed pageout, the caller (the pagedaemon) has
490 * already built the cluster for us. the starting swap
491 * block we are writing to has been passed in as "start."
492 * "pg" could be NULL if there is no page we are especially
493 * interested in (in which case the whole cluster gets dropped
494 * in the event of an error or a sync "done").
495 */
496 swblk = (daddr_t) start;
497 /* ppsp and npages should be ok */
498 }
499
500 /* now that we've clustered we can unlock the page queues */
501 uvm_unlock_pageq();
502
503 /*
504 * now attempt the I/O. if we have a failure and we are
505 * clustered, we will drop the cluster and try again.
506 */
507
508 ReTry:
509 if (uobj) {
510 /* object is locked */
511 result = uobj->pgops->pgo_put(uobj, ppsp, *npages,
512 flags & PGO_SYNCIO);
513 /* object is now unlocked */
514 } else {
515 /* nothing locked */
516 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
517 /* nothing locked */
518 }
519
520 /*
521 * we have attempted the I/O.
522 *
523 * if the I/O was a success then:
524 * if !PGO_PDFREECLUST, we return the cluster to the
525 * caller (who must un-busy all pages)
526 * else we un-busy cluster pages for the pagedaemon
527 *
528 * if I/O is pending (async i/o) then we return the pending code.
529 * [in this case the async i/o done function must clean up when
530 * i/o is done...]
531 */
532
533 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
534 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
535 /*
536 * drop cluster and relock object (only if I/O is
537 * not pending)
538 */
539 if (uobj)
540 /* required for dropcluster */
541 simple_lock(&uobj->vmobjlock);
542 if (*npages > 1 || pg == NULL)
543 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
544 PGO_PDFREECLUST, 0);
545 /* if (uobj): object still locked, as per
546 * return-state item #3 */
547 }
548 return (result);
549 }
550
551 /*
552 * a pager error occured. if we have clustered, we drop the
553 * cluster and try again.
554 */
555
556 if (*npages > 1 || pg == NULL) {
557 if (uobj)
558 simple_lock(&uobj->vmobjlock);
559 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
560 swblk);
561 if (pg != NULL)
562 goto ReTry;
563 }
564
565 /*
566 * a pager error occured (even after dropping the cluster, if there
567 * was one). give up! the caller only has one page ("pg")
568 * to worry about.
569 */
570
571 if (uobj && (flags & PGO_PDFREECLUST) != 0)
572 simple_lock(&uobj->vmobjlock);
573 return(result);
574 }
575
576 /*
577 * uvm_pager_dropcluster: drop a cluster we have built (because we
578 * got an error, or, if PGO_PDFREECLUST we are un-busying the
579 * cluster pages on behalf of the pagedaemon).
580 *
581 * => uobj, if non-null, is a non-swap-backed object that is
582 * locked by the caller. we return with this object still
583 * locked.
584 * => page queues are not locked
585 * => pg is our page of interest (the one we clustered around, can be null)
586 * => ppsp/npages is our current cluster
587 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
588 * pages on behalf of the pagedaemon.
589 * PGO_REALLOCSWAP: drop previously allocated swap slots for
590 * clustered swap-backed pages (except for "pg" if !NULL)
591 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
592 * [only meaningful if swap-backed (uobj == NULL)]
593 */
594
595
596 void uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
597
598 struct uvm_object *uobj; /* IN */
599 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
600 int *npages; /* IN/OUT */
601 int flags;
602 int swblk; /* valid if (uobj == NULL && PGO_REALLOCSWAP) */
603
604 {
605 int lcv;
606 boolean_t obj_is_alive;
607 struct uvm_object *saved_uobj;
608
609 /*
610 * if we need to reallocate swap space for the cluster we are dropping
611 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
612 * allocation now. save a block for "pg" if it is non-NULL.
613 *
614 * note that we will zap the object's pointer to swap in the "for" loop
615 * below...
616 */
617
618 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
619 if (pg)
620 uvm_swap_free(swblk + 1, *npages - 1);
621 else
622 uvm_swap_free(swblk, *npages);
623 }
624
625 /*
626 * drop all pages but "pg"
627 */
628
629 for (lcv = 0 ; lcv < *npages ; lcv++) {
630
631 if (ppsp[lcv] == pg) /* skip "pg" */
632 continue;
633
634 /*
635 * if swap-backed, gain lock on object that owns page. note
636 * that PQ_ANON bit can't change as long as we are holding
637 * the PG_BUSY bit (so there is no need to lock the page
638 * queues to test it).
639 *
640 * once we have the lock, dispose of the pointer to swap, if
641 * requested
642 */
643 if (!uobj) {
644 if (ppsp[lcv]->pqflags & PQ_ANON) {
645 simple_lock(&ppsp[lcv]->uanon->an_lock);
646 if (flags & PGO_REALLOCSWAP)
647 /* zap swap block */
648 ppsp[lcv]->uanon->an_swslot = 0;
649 } else {
650 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
651 if (flags & PGO_REALLOCSWAP)
652 uao_set_swslot(ppsp[lcv]->uobject,
653 ppsp[lcv]->offset / PAGE_SIZE, 0);
654 }
655 }
656
657 /* did someone want the page while we had it busy-locked? */
658 if (ppsp[lcv]->flags & PG_WANTED)
659 /* still holding obj lock */
660 thread_wakeup(ppsp[lcv]);
661
662 /* if page was released, release it. otherwise un-busy it */
663 if (ppsp[lcv]->flags & PG_RELEASED) {
664
665 if (ppsp[lcv]->pqflags & PQ_ANON) {
666 /* so that anfree will free */
667 ppsp[lcv]->flags &= ~(PG_BUSY);
668 UVM_PAGE_OWN(ppsp[lcv], NULL);
669
670 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
671 VM_PROT_NONE); /* be safe */
672 /* kills anon and frees pg */
673 uvm_anfree(ppsp[lcv]->uanon);
674
675 continue;
676 }
677
678 /*
679 * pgo_releasepg will dump the page for us
680 */
681
682 #ifdef DIAGNOSTIC
683 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
684 panic("uvm_pager_dropcluster: no releasepg "
685 "function");
686 #endif
687 saved_uobj = ppsp[lcv]->uobject;
688 obj_is_alive =
689 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
690
691 #ifdef DIAGNOSTIC
692 /* for normal objects, "pg" is still PG_BUSY by us,
693 * so obj can't die */
694 if (uobj && !obj_is_alive)
695 panic("uvm_pager_dropcluster: object died "
696 "with active page");
697 #endif
698 /* only unlock the object if it is still alive... */
699 if (obj_is_alive && saved_uobj != uobj)
700 simple_unlock(&saved_uobj->vmobjlock);
701
702 /*
703 * XXXCDC: suppose uobj died in the pgo_releasepg?
704 * how pass that
705 * info up to caller. we are currently ignoring it...
706 */
707
708 continue; /* next page */
709
710 } else {
711 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
712 UVM_PAGE_OWN(ppsp[lcv], NULL);
713 }
714
715 /*
716 * if we are operating on behalf of the pagedaemon and we
717 * had a successful pageout update the page!
718 */
719 if (flags & PGO_PDFREECLUST) {
720 /* XXX: with PMAP_NEW ref should already be clear,
721 * but don't trust! */
722 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
723 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
724 ppsp[lcv]->flags |= PG_CLEAN;
725 }
726
727 /* if anonymous cluster, unlock object and move on */
728 if (!uobj) {
729 if (ppsp[lcv]->pqflags & PQ_ANON)
730 simple_unlock(&ppsp[lcv]->uanon->an_lock);
731 else
732 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
733 }
734
735 }
736
737 /*
738 * drop to a cluster of 1 page ("pg") if requested
739 */
740
741 if (pg && (flags & PGO_PDFREECLUST) == 0) {
742 /*
743 * if we are not a successful pageout, we make a 1 page cluster.
744 */
745 ppsp[0] = pg;
746 *npages = 1;
747
748 /*
749 * assign new swap block to new cluster, if anon backed
750 */
751 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
752 if (pg->pqflags & PQ_ANON) {
753 simple_lock(&pg->uanon->an_lock);
754 pg->uanon->an_swslot = swblk; /* reassign */
755 simple_unlock(&pg->uanon->an_lock);
756 } else {
757 simple_lock(&pg->uobject->vmobjlock);
758 uao_set_swslot(pg->uobject,
759 pg->offset / PAGE_SIZE, swblk);
760 simple_unlock(&pg->uobject->vmobjlock);
761 }
762 }
763 }
764 }
765