uvm_pager.c revision 1.4 1 /* $NetBSD: uvm_pager.c,v 1.4 1998/02/08 06:15:59 thorpej Exp $ */
2
3 /*
4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 */
7 /*
8 *
9 * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor and
23 * Washington University.
24 * 4. The name of the author may not be used to endorse or promote products
25 * derived from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 *
38 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
39 */
40
41 /*
42 * uvm_pager.c: generic functions used to assist the pagers.
43 */
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/mount.h>
48 #include <sys/proc.h>
49 #include <sys/malloc.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_kern.h>
54
55 #include <sys/syscallargs.h>
56
57 #define UVM_PAGER
58 #include <uvm/uvm.h>
59
60 UVMHIST_DECL(maphist);
61
62 /*
63 * list of uvm pagers in the system
64 */
65
66 extern struct uvm_pagerops uvm_deviceops;
67 extern struct uvm_pagerops uvm_vnodeops;
68
69 struct uvm_pagerops *uvmpagerops[] = {
70 &uvm_deviceops,
71 &uvm_vnodeops,
72 };
73
74 /*
75 * the pager map: provides KVA for I/O
76 */
77
78 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
79 vm_map_t pager_map; /* XXX */
80 simple_lock_data_t pager_map_wanted_lock;
81 boolean_t pager_map_wanted; /* locked by pager map */
82
83
84 /*
85 * uvm_pager_init: init pagers (at boot time)
86 */
87
88 void uvm_pager_init()
89
90 {
91 int lcv;
92
93 /*
94 * init pager map
95 */
96
97 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
98 PAGER_MAP_SIZE, FALSE, FALSE, NULL);
99 simple_lock_init(&pager_map_wanted_lock);
100 pager_map_wanted = FALSE;
101
102 /*
103 * init ASYNC I/O queue
104 */
105
106 TAILQ_INIT(&uvm.aio_done);
107
108 /*
109 * call pager init functions
110 */
111 for (lcv = 0 ;
112 lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *) ; lcv++) {
113 if (uvmpagerops[lcv]->pgo_init)
114 uvmpagerops[lcv]->pgo_init();
115 }
116 }
117
118 /*
119 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
120 *
121 * we basically just map in a blank map entry to reserve the space in the
122 * map and then use pmap_enter() to put the mappings in by hand.
123 */
124
125 vm_offset_t uvm_pagermapin(pps, npages, aiop, waitf)
126
127 struct vm_page **pps;
128 int npages;
129 struct uvm_aiodesc **aiop; /* OUT */
130 int waitf;
131
132 {
133 vm_size_t size;
134 vm_offset_t kva;
135 struct uvm_aiodesc *aio;
136 #if !defined(PMAP_NEW)
137 vm_offset_t cva;
138 struct vm_page *pp;
139 #endif
140 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
141
142 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
143 pps, npages, aiop, waitf);
144
145 ReStart:
146 if (aiop) {
147 MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
148 if (aio == NULL)
149 return(0);
150 *aiop = aio;
151 } else {
152 aio = NULL;
153 }
154
155 size = npages * PAGE_SIZE;
156 kva = NULL; /* let system choose VA */
157
158 if (uvm_map(pager_map, &kva, size, NULL,
159 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
160 if (waitf == M_NOWAIT) {
161 if (aio)
162 FREE(aio, M_TEMP);
163 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
164 return(NULL);
165 }
166 simple_lock(&pager_map_wanted_lock);
167 pager_map_wanted = TRUE;
168 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
169 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
170 "pager_map",0);
171 goto ReStart;
172 }
173
174 #if defined(PMAP_NEW)
175 /*
176 * XXX: (ab)using the pmap module to store state info for us.
177 * (pmap stores the PAs... we fetch them back later and convert back
178 * to pages with PHYS_TO_VM_PAGE).
179 */
180 pmap_kenter_pgs(kva, pps, npages);
181
182 #else /* PMAP_NEW */
183
184 /* got it */
185 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
186 pp = *pps++;
187 #ifdef DEBUG
188 if ((pp->flags & PG_BUSY) == 0)
189 panic("uvm_pagermapin: page not busy");
190 #endif
191
192 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
193 VM_PROT_DEFAULT, TRUE);
194 }
195
196 #endif /* PMAP_NEW */
197
198 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
199 return(kva);
200 }
201
202 /*
203 * uvm_pagermapout: remove pager_map mapping
204 *
205 * we remove our mappings by hand and then remove the mapping (waking
206 * up anyone wanting space).
207 */
208
209 void uvm_pagermapout(kva, npages)
210
211 vm_offset_t kva;
212 int npages;
213
214 {
215 vm_size_t size = npages * PAGE_SIZE;
216 vm_map_entry_t entries;
217 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
218
219 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
220
221 /*
222 * duplicate uvm_unmap, but add in pager_map_wanted handling.
223 */
224
225 vm_map_lock(pager_map);
226 (void) uvm_unmap_remove(pager_map, kva, kva + size, 0, &entries);
227 simple_lock(&pager_map_wanted_lock);
228 if (pager_map_wanted) {
229 pager_map_wanted = FALSE;
230 wakeup(pager_map);
231 }
232 simple_unlock(&pager_map_wanted_lock);
233 vm_map_unlock(pager_map);
234 if (entries)
235 uvm_unmap_detach(entries, 0);
236
237 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
238 }
239
240 /*
241 * uvm_mk_pcluster
242 *
243 * generic "make 'pager put' cluster" function. a pager can either
244 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
245 * generic function, or [3] set it to a pager specific function.
246 *
247 * => caller must lock object _and_ pagequeues (since we need to look
248 * at active vs. inactive bits, etc.)
249 * => caller must make center page busy and write-protect it
250 * => we mark all cluster pages busy for the caller
251 * => the caller must unbusy all pages (and check wanted/released
252 * status if it drops the object lock)
253 * => flags:
254 * PGO_ALLPAGES: all pages in object are valid targets
255 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
256 * PGO_DOACTCLUST: include active pages in cluster.
257 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
258 * PG_CLEANCHK is only a hint, but clearing will help reduce
259 * the number of calls we make to the pmap layer.
260 */
261
262 struct vm_page **uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
263
264 struct uvm_object *uobj; /* IN */
265 struct vm_page **pps, *center; /* IN/OUT, IN */
266 int *npages, flags; /* IN/OUT, IN */
267 vm_offset_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
268
269 {
270 struct vm_page **ppsp, *pclust;
271 vm_offset_t lo, hi, curoff;
272 int center_idx, forward;
273 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
274
275 /*
276 * center page should already be busy and write protected. XXX:
277 * suppose page is wired? if we lock, then a process could
278 * fault/block on it. if we don't lock, a process could write the
279 * pages in the middle of an I/O. (consider an msync()). let's
280 * lock it for now (better to delay than corrupt data?).
281 */
282
283 /*
284 * get cluster boundaries, check sanity, and apply our limits as well.
285 */
286
287 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
288 if ((flags & PGO_ALLPAGES) == 0) {
289 if (lo < mlo)
290 lo = mlo;
291 if (hi > mhi)
292 hi = mhi;
293 }
294 if ((hi - lo) / PAGE_SIZE > *npages) { /* pps too small, bail out! */
295 #ifdef DIAGNOSTIC
296 printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
297 #endif
298 pps[0] = center;
299 *npages = 1;
300 return(pps);
301 }
302
303 /*
304 * now determine the center and attempt to cluster around the
305 * edges
306 */
307
308 center_idx = (center->offset - lo) / PAGE_SIZE;
309 pps[center_idx] = center; /* plug in the center page */
310 ppsp = &pps[center_idx];
311 *npages = 1;
312
313 /*
314 * attempt to cluster around the left [backward], and then
315 * the right side [forward].
316 *
317 * note that for inactive pages (pages that have been deactivated)
318 * there are no valid mappings and PG_CLEAN should be up to date.
319 * [i.e. there is no need to query the pmap with pmap_is_modified
320 * since there are no mappings].
321 */
322
323 for (forward = 0 ; forward <= 1 ; forward++) {
324
325 curoff = center->offset + PAGE_SIZE * (forward) ? 1 : -1;
326 for ( ; (forward == 0 && curoff >= lo) || (forward && curoff < hi) ;
327 curoff = curoff + PAGE_SIZE * (forward) ? 1 : -1) {
328
329 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
330 if (pclust == NULL)
331 break; /* no page */
332 /* handle active pages */
333 /* NOTE: inactive pages don't have pmap mappings */
334 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
335 if ((flags & PGO_DOACTCLUST) == 0)
336 break; /* dont want mapped pages at all */
337 /* make sure "clean" bit is sync'd */
338 if ((pclust->flags & PG_CLEANCHK) == 0) {
339 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) == PG_CLEAN &&
340 pmap_is_modified(PMAP_PGARG(pclust)))
341 pclust->flags &= ~PG_CLEAN;
342 pclust->flags |= PG_CLEANCHK; /* now checked */
343 }
344 }
345 /* is page available for cleaning and does it need it? */
346 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0)
347 break; /* page is already clean or is busy */
348 /* yes! enroll the page in our array */
349 pclust->flags |= PG_BUSY; /* busy! */
350 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
351 /* XXX: protect wired page? see above comment. */
352 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
353 if (!forward) {
354 ppsp--; /* back up one page */
355 *ppsp = pclust;
356 } else {
357 ppsp[*npages] = pclust; /* move forward one page */
358 }
359 *npages = *npages + 1;
360 }
361
362 }
363
364 /*
365 * done! return the cluster array to the caller!!!
366 */
367
368 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
369 return(ppsp);
370 }
371
372
373 /*
374 * uvm_shareprot: generic share protect routine
375 *
376 * => caller must lock map entry's map
377 * => caller must lock object pointed to by map entry
378 */
379
380 void uvm_shareprot(entry, prot)
381
382 vm_map_entry_t entry;
383 vm_prot_t prot;
384
385 {
386 struct uvm_object *uobj = entry->object.uvm_obj;
387 struct vm_page *pp;
388 vm_offset_t start, stop;
389 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
390
391 if (UVM_ET_ISMAP(entry))
392 panic("uvm_shareprot: non-object attached");
393
394 start = entry->offset;
395 stop = start + (entry->end - entry->start);
396
397 /*
398 * traverse list of pages in object. if page in range, pmap_prot it
399 */
400
401 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
402 if (pp->offset >= start && pp->offset < stop)
403 pmap_page_protect(PMAP_PGARG(pp), prot);
404 }
405 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
406 }
407
408 /*
409 * uvm_pager_put: high level pageout routine
410 *
411 * we want to pageout page "pg" to backing store, clustering if
412 * possible.
413 *
414 * => page queues must be locked by caller
415 * => if page is not swap-backed, then "uobj" points to the object
416 * backing it. this object should be locked by the caller.
417 * => if page is swap-backed, then "uobj" should be NULL.
418 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
419 * for swap-backed memory, "pg" can be NULL if there is no page
420 * of interest [sometimes the case for the pagedaemon]
421 * => "ppsp_ptr" should point to an array of npages vm_page pointers
422 * for possible cluster building
423 * => flags (first two for non-swap-backed pages)
424 * PGO_ALLPAGES: all pages in uobj are valid targets
425 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
426 * PGO_SYNCIO: do SYNC I/O (no async)
427 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
428 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
429 * if (!uobj) start is the (daddr_t) of the starting swapblk
430 * => return state:
431 * 1. we return the VM_PAGER status code of the pageout
432 * 2. we return with the page queues unlocked
433 * 3. if (uobj != NULL) [!swap_backed] we return with
434 * uobj locked _only_ if PGO_PDFREECLUST is set
435 * AND result != VM_PAGER_PEND. in all other cases
436 * we return with uobj unlocked. [this is a hack
437 * that allows the pagedaemon to save one lock/unlock
438 * pair in the !swap_backed case since we have to
439 * lock the uobj to drop the cluster anyway]
440 * 4. on errors we always drop the cluster. thus, if we return
441 * !PEND, !OK, then the caller only has to worry about
442 * un-busying the main page (not the cluster pages).
443 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
444 * with all pages busy (caller must un-busy and check
445 * wanted/released flags).
446 */
447
448 int uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
449
450 struct uvm_object *uobj; /* IN */
451 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
452 int *npages; /* IN/OUT */
453 int flags; /* IN */
454 vm_offset_t start, stop; /* IN, IN */
455
456 {
457 int result;
458 daddr_t swblk;
459 struct vm_page **ppsp = *ppsp_ptr;
460
461 /*
462 * note that uobj is null if we are doing a swap-backed pageout.
463 * note that uobj is !null if we are doing normal object pageout.
464 * note that the page queues must be locked to cluster.
465 */
466
467 if (uobj) { /* if !swap-backed */
468
469 /*
470 * attempt to build a cluster for pageout using its make-put-cluster
471 * function (if it has one).
472 */
473
474 if (uobj->pgops->pgo_mk_pcluster) {
475 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp, npages, pg, flags,
476 start, stop);
477 *ppsp_ptr = ppsp; /* update caller's pointer */
478 } else {
479 ppsp[0] = pg;
480 *npages = 1;
481 }
482
483 swblk = 0; /* XXX: keep gcc happy */
484
485 } else {
486
487 /*
488 * for swap-backed pageout, the caller (the pagedaemon) has already
489 * built the cluster for us. the starting swap block we are writing
490 * to has been passed in as "start." "pg" could be NULL if there
491 * is no page we are especially interested in (in which case the
492 * whole cluster gets dropped in the event of an error or a sync
493 * "done").
494 */
495 swblk = (daddr_t) start;
496 /* ppsp and npages should be ok */
497 }
498
499 /* now that we've clustered we can unlock the page queues */
500 uvm_unlock_pageq();
501
502 /*
503 * now attempt the I/O. if we have a failure and we are
504 * clustered, we will drop the cluster and try again.
505 */
506
507 ReTry:
508 if (uobj) {
509 /* object is locked */
510 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags & PGO_SYNCIO);
511 /* object is now unlocked */
512 } else {
513 /* nothing locked */
514 result = uvm_swap_put(swblk, ppsp, *npages, flags & PGO_SYNCIO);
515 /* nothing locked */
516 }
517
518 /*
519 * we have attempted the I/O.
520 *
521 * if the I/O was a success then:
522 * if !PGO_PDFREECLUST, we return the cluster to the
523 * caller (who must un-busy all pages)
524 * else we un-busy cluster pages for the pagedaemon
525 *
526 * if I/O is pending (async i/o) then we return the pending code.
527 * [in this case the async i/o done function must clean up when
528 * i/o is done...]
529 */
530
531 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
532 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
533 /*
534 * drop cluster and relock object (only if I/O is not pending)
535 */
536 if (uobj)
537 simple_lock(&uobj->vmobjlock); /* required for dropcluster */
538 if (*npages > 1 || pg == NULL)
539 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_PDFREECLUST, 0);
540 /* if (uobj): object still locked, as per return-state item #3 */
541 }
542 return(result);
543 }
544
545 /*
546 * a pager error occured. if we have clustered, we drop the
547 * cluster and try again.
548 */
549
550 if (*npages > 1 || pg == NULL) {
551 if (uobj)
552 simple_lock(&uobj->vmobjlock);
553 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP, swblk);
554 if (pg != NULL)
555 goto ReTry;
556 }
557
558 /*
559 * a pager error occured (even after dropping the cluster, if there
560 * was one). give up! the caller only has one page ("pg")
561 * to worry about.
562 */
563
564 if (uobj && (flags & PGO_PDFREECLUST) != 0)
565 simple_lock(&uobj->vmobjlock);
566 return(result);
567 }
568
569 /*
570 * uvm_pager_dropcluster: drop a cluster we have built (because we
571 * got an error, or, if PGO_PDFREECLUST we are un-busying the
572 * cluster pages on behalf of the pagedaemon).
573 *
574 * => uobj, if non-null, is a non-swap-backed object that is
575 * locked by the caller. we return with this object still
576 * locked.
577 * => page queues are not locked
578 * => pg is our page of interest (the one we clustered around, can be null)
579 * => ppsp/npages is our current cluster
580 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
581 * pages on behalf of the pagedaemon.
582 * PGO_REALLOCSWAP: drop previously allocated swap slots for
583 * clustered swap-backed pages (except for "pg" if !NULL)
584 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
585 * [only meaningful if swap-backed (uobj == NULL)]
586 */
587
588
589 void uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
590
591 struct uvm_object *uobj; /* IN */
592 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
593 int *npages; /* IN/OUT */
594 int flags;
595 int swblk; /* valid if (uobj == NULL && PGO_REALLOCSWAP) */
596
597 {
598 int lcv;
599 boolean_t obj_is_alive;
600
601 /*
602 * if we need to reallocate swap space for the cluster we are dropping
603 * (true if swap-backed and PGO_REALLOCSWAP) then free the old allocation
604 * now. save a block for "pg" if it is non-NULL.
605 *
606 * note that we will zap the object's pointer to swap in the "for" loop
607 * below...
608 */
609
610 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
611 if (pg)
612 uvm_swap_free(swblk + 1, *npages - 1);
613 else
614 uvm_swap_free(swblk, *npages);
615 }
616
617 /*
618 * drop all pages but "pg"
619 */
620
621 for (lcv = 0 ; lcv < *npages ; lcv++) {
622
623 if (ppsp[lcv] == pg) /* skip "pg" */
624 continue;
625
626 /*
627 * if swap-backed, gain lock on object that owns page. note
628 * that PQ_ANON bit can't change as long as we are holding
629 * the PG_BUSY bit (so there is no need to lock the page
630 * queues to test it).
631 *
632 * once we have the lock, dispose of the pointer to swap, if requested
633 */
634 if (!uobj) {
635 if (ppsp[lcv]->pqflags & PQ_ANON) {
636 simple_lock(&ppsp[lcv]->uanon->an_lock);
637 if (flags & PGO_REALLOCSWAP)
638 ppsp[lcv]->uanon->an_swslot = 0; /* zap swap block */
639 } else {
640 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
641 if (flags & PGO_REALLOCSWAP)
642 uao_set_swslot(ppsp[lcv]->uobject, ppsp[lcv]->offset / PAGE_SIZE, 0);
643 }
644 }
645
646 /* did someone want the page while we had it busy-locked? */
647 if (ppsp[lcv]->flags & PG_WANTED)
648 thread_wakeup(ppsp[lcv]); /* still holding obj lock */
649
650 /* if page was released, release it. otherwise un-busy it */
651 if (ppsp[lcv]->flags & PG_RELEASED) {
652
653 if (ppsp[lcv]->pqflags & PQ_ANON) {
654 ppsp[lcv]->flags &= ~(PG_BUSY); /* so that anfree will free */
655 UVM_PAGE_OWN(ppsp[lcv], NULL);
656 pmap_page_protect(PMAP_PGARG(ppsp[lcv]), VM_PROT_NONE); /* be safe */
657 uvm_anfree(ppsp[lcv]->uanon); /* kills anon and frees pg */
658 continue;
659 }
660
661 /*
662 * pgo_releasepg will dump the page for us
663 */
664
665 #ifdef DIAGNOSTIC
666 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
667 panic("uvm_pager_dropcluster: no releasepg function");
668 #endif
669 obj_is_alive = ppsp[lcv]->uobject->pgops->pgo_releasepg(pg, NULL);
670
671 #ifdef DIAGNOSTIC
672 /* for normal objects, "pg" is still PG_BUSY by us, so obj can't die */
673 if (uobj && !obj_is_alive)
674 panic("uvm_pager_dropcluster: object died with active page");
675 #endif
676 if (!obj_is_alive)
677 continue;
678
679 } else {
680 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED);
681 UVM_PAGE_OWN(ppsp[lcv], NULL);
682 }
683
684 /*
685 * if we are operating on behalf of the pagedaemon and we
686 * had a successful pageout update the page!
687 */
688 if (flags & PGO_PDFREECLUST) {
689 /* XXX: with PMAP_NEW ref should already be clear, but don't trust! */
690 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
691 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
692 ppsp[lcv]->flags |= PG_CLEAN;
693 }
694
695 /* if anonymous cluster, unlock object and move on */
696 if (!uobj) {
697 if (ppsp[lcv]->pqflags & PQ_ANON)
698 simple_unlock(&ppsp[lcv]->uanon->an_lock);
699 else
700 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
701 }
702
703 }
704
705 /*
706 * drop to a cluster of 1 page ("pg") if requested
707 */
708
709 if (pg && (flags & PGO_PDFREECLUST) == 0) {
710 /*
711 * if we are not a successful pageout, we make a 1 page cluster.
712 */
713 ppsp[0] = pg;
714 *npages = 1;
715
716 /*
717 * assign new swap block to new cluster, if anon backed
718 */
719 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
720 if (pg->pqflags & PQ_ANON) {
721 simple_lock(&pg->uanon->an_lock);
722 pg->uanon->an_swslot = swblk; /* reassign */
723 simple_unlock(&pg->uanon->an_lock);
724 } else {
725 simple_lock(&pg->uobject->vmobjlock);
726 uao_set_swslot(pg->uobject, pg->offset / PAGE_SIZE, swblk);
727 simple_unlock(&pg->uobject->vmobjlock);
728 }
729 }
730 }
731
732 }
733