uvm_pager.c revision 1.16.4.6 1 /* $NetBSD: uvm_pager.c,v 1.16.4.6 1999/08/02 23:37:04 thorpej Exp $ */
2
3 /*
4 *
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
35 */
36
37 #include "opt_pmap_new.h"
38 #include "opt_uvmhist.h"
39
40 /*
41 * uvm_pager.c: generic functions used to assist the pagers.
42 */
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/pool.h>
49 #include <sys/vnode.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_kern.h>
54
55 #define UVM_PAGER
56 #include <uvm/uvm.h>
57
58 struct pool *uvm_aiobuf_pool;
59
60 /*
61 * list of uvm pagers in the system
62 */
63
64 extern struct uvm_pagerops aobj_pager;
65 extern struct uvm_pagerops uvm_deviceops;
66 extern struct uvm_pagerops uvm_vnodeops;
67 extern struct uvm_pagerops ubc_pager;
68
69 struct uvm_pagerops *uvmpagerops[] = {
70 &aobj_pager,
71 &uvm_deviceops,
72 &uvm_vnodeops,
73 &ubc_pager,
74 };
75
76 /*
77 * the pager map: provides KVA for I/O
78 */
79
80 #define PAGER_MAP_SIZE (4 * 1024 * 1024)
81 vm_map_t pager_map; /* XXX */
82 simple_lock_data_t pager_map_wanted_lock;
83 boolean_t pager_map_wanted; /* locked by pager map */
84
85
86 /*
87 * uvm_pager_init: init pagers (at boot time)
88 */
89
90 void
91 uvm_pager_init()
92 {
93 int lcv;
94
95 /*
96 * init pager map
97 */
98
99 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
100 PAGER_MAP_SIZE, 0, FALSE, NULL);
101 simple_lock_init(&pager_map_wanted_lock);
102 pager_map_wanted = FALSE;
103
104 /*
105 * init ASYNC I/O queue
106 */
107
108 TAILQ_INIT(&uvm.aio_done);
109
110 /*
111 * call pager init functions
112 */
113 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
114 lcv++) {
115 if (uvmpagerops[lcv]->pgo_init)
116 uvmpagerops[lcv]->pgo_init();
117 }
118 }
119
120 /*
121 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
122 *
123 * we basically just map in a blank map entry to reserve the space in the
124 * map and then use pmap_enter() to put the mappings in by hand.
125 *
126 * XXX It would be nice to know the direction of the I/O, so that we can
127 * XXX map only what is necessary.
128 */
129
130 vaddr_t
131 uvm_pagermapin(pps, npages, waitf)
132 struct vm_page **pps;
133 int npages;
134 int waitf;
135 {
136 vsize_t size;
137 vaddr_t kva;
138 vaddr_t cva;
139 struct vm_page *pp;
140 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
141
142 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, waitf=%d)",
143 pps, npages, waitf, 0);
144
145 ReStart:
146 size = npages << PAGE_SHIFT;
147 kva = NULL; /* let system choose VA */
148
149 if (uvm_map(pager_map, &kva, size, NULL,
150 UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
151 if (waitf == M_NOWAIT) {
152 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
153 return(0);
154 }
155 simple_lock(&pager_map_wanted_lock);
156 pager_map_wanted = TRUE;
157 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0);
158 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
159 "pager_map",0);
160 goto ReStart;
161 }
162
163 /* got it */
164 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
165 pp = *pps++;
166 #ifdef DEBUG
167 if ((pp->flags & PG_BUSY) == 0)
168 panic("uvm_pagermapin: page not busy");
169 #endif
170
171 /*
172 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
173 * XXX really necessary? It could lead to unnecessary
174 * XXX instruction cache flushes.
175 */
176 pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
177 VM_PROT_DEFAULT, TRUE,
178 VM_PROT_READ | VM_PROT_WRITE);
179 }
180
181 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
182 return(kva);
183 }
184
185 /*
186 * uvm_pagermapout: remove pager_map mapping
187 *
188 * we remove our mappings by hand and then remove the mapping (waking
189 * up anyone wanting space).
190 */
191
192 void
193 uvm_pagermapout(kva, npages)
194 vaddr_t kva;
195 int npages;
196 {
197 vsize_t size = npages << PAGE_SHIFT;
198 vm_map_entry_t entries;
199 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
200 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
201
202 /*
203 * duplicate uvm_unmap, but add in pager_map_wanted handling.
204 */
205
206 vm_map_lock(pager_map);
207 (void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
208 simple_lock(&pager_map_wanted_lock);
209 if (pager_map_wanted) {
210 pager_map_wanted = FALSE;
211 wakeup(pager_map);
212 }
213 simple_unlock(&pager_map_wanted_lock);
214 vm_map_unlock(pager_map);
215 pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
216 if (entries)
217 uvm_unmap_detach(entries, 0);
218
219 UVMHIST_LOG(maphist,"<- done",0,0,0,0);
220 }
221
222 /*
223 * uvm_mk_pcluster
224 *
225 * generic "make 'pager put' cluster" function. a pager can either
226 * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
227 * generic function, or [3] set it to a pager specific function.
228 *
229 * => caller must lock object _and_ pagequeues (since we need to look
230 * at active vs. inactive bits, etc.)
231 * => caller must make center page busy and write-protect it
232 * => we mark all cluster pages busy for the caller
233 * => the caller must unbusy all pages (and check wanted/released
234 * status if it drops the object lock)
235 * => flags:
236 * PGO_ALLPAGES: all pages in object are valid targets
237 * !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
238 * PGO_DOACTCLUST: include active pages in cluster.
239 * NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
240 * PG_CLEANCHK is only a hint, but clearing will help reduce
241 * the number of calls we make to the pmap layer.
242 */
243
244 struct vm_page **
245 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
246 struct uvm_object *uobj; /* IN */
247 struct vm_page **pps, *center; /* IN/OUT, IN */
248 int *npages, flags; /* IN/OUT, IN */
249 vaddr_t mlo, mhi; /* IN (if !PGO_ALLPAGES) */
250 {
251 struct vm_page **ppsp, *pclust;
252 vaddr_t lo, hi, curoff;
253 int center_idx, forward, incr;
254 UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
255
256 /*
257 * center page should already be busy and write protected. XXX:
258 * suppose page is wired? if we lock, then a process could
259 * fault/block on it. if we don't lock, a process could write the
260 * pages in the middle of an I/O. (consider an msync()). let's
261 * lock it for now (better to delay than corrupt data?).
262 */
263
264 /*
265 * get cluster boundaries, check sanity, and apply our limits as well.
266 */
267
268 uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
269 if ((flags & PGO_ALLPAGES) == 0) {
270 if (lo < mlo)
271 lo = mlo;
272 if (hi > mhi)
273 hi = mhi;
274 }
275 if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
276 #ifdef DIAGNOSTIC
277 printf("uvm_mk_pcluster uobj %p npages %d lo 0x%lx hi 0x%lx flags 0x%x\n",
278 uobj, *npages, lo, hi, flags);
279 #endif
280 pps[0] = center;
281 *npages = 1;
282 return(pps);
283 }
284
285 /*
286 * now determine the center and attempt to cluster around the
287 * edges
288 */
289
290 center_idx = (center->offset - lo) >> PAGE_SHIFT;
291 pps[center_idx] = center; /* plug in the center page */
292 ppsp = &pps[center_idx];
293 *npages = 1;
294
295 /*
296 * attempt to cluster around the left [backward], and then
297 * the right side [forward].
298 *
299 * note that for inactive pages (pages that have been deactivated)
300 * there are no valid mappings and PG_CLEAN should be up to date.
301 * [i.e. there is no need to query the pmap with pmap_is_modified
302 * since there are no mappings].
303 */
304
305 for (forward = 0 ; forward <= 1 ; forward++) {
306 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
307 curoff = center->offset + incr;
308 for ( ;(forward == 0 && curoff >= lo) ||
309 (forward && curoff < hi);
310 curoff += incr) {
311
312 pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
313 if (pclust == NULL) {
314 break; /* no page */
315 }
316 /* handle active pages */
317 /* NOTE: inactive pages don't have pmap mappings */
318 if ((pclust->pqflags & PQ_INACTIVE) == 0) {
319 if ((flags & PGO_DOACTCLUST) == 0) {
320 /* dont want mapped pages at all */
321 break;
322 }
323
324 /* make sure "clean" bit is sync'd */
325 if ((pclust->flags & PG_CLEANCHK) == 0) {
326 if ((pclust->flags & (PG_CLEAN|PG_BUSY))
327 == PG_CLEAN &&
328 pmap_is_modified(PMAP_PGARG(pclust)))
329 pclust->flags &= ~PG_CLEAN;
330 /* now checked */
331 pclust->flags |= PG_CLEANCHK;
332 }
333 }
334
335 /* is page available for cleaning and does it need it */
336 if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
337 break; /* page is already clean or is busy */
338 }
339
340 /* yes! enroll the page in our array */
341 pclust->flags |= PG_BUSY; /* busy! */
342 UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
343
344 /* XXX: protect wired page? see above comment. */
345 pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
346 if (!forward) {
347 ppsp--; /* back up one page */
348 *ppsp = pclust;
349 } else {
350 /* move forward one page */
351 ppsp[*npages] = pclust;
352 }
353 (*npages)++;
354 }
355 }
356
357 /*
358 * done! return the cluster array to the caller!!!
359 */
360
361 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
362 return(ppsp);
363 }
364
365
366 /*
367 * uvm_shareprot: generic share protect routine
368 *
369 * => caller must lock map entry's map
370 * => caller must lock object pointed to by map entry
371 */
372
373 void
374 uvm_shareprot(entry, prot)
375 vm_map_entry_t entry;
376 vm_prot_t prot;
377 {
378 struct uvm_object *uobj = entry->object.uvm_obj;
379 struct vm_page *pp;
380 vaddr_t start, stop;
381 UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
382
383 if (UVM_ET_ISSUBMAP(entry))
384 panic("uvm_shareprot: non-object attached");
385
386 start = entry->offset;
387 stop = start + (entry->end - entry->start);
388
389 /*
390 * traverse list of pages in object. if page in range, pmap_prot it
391 */
392
393 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
394 if (pp->offset >= start && pp->offset < stop)
395 pmap_page_protect(PMAP_PGARG(pp), prot);
396 }
397 UVMHIST_LOG(maphist, "<- done",0,0,0,0);
398 }
399
400 /*
401 * uvm_pager_put: high level pageout routine
402 *
403 * we want to pageout page "pg" to backing store, clustering if
404 * possible.
405 *
406 * => page queues must be locked by caller
407 * => if page is not swap-backed, then "uobj" points to the object
408 * backing it. this object should be locked by the caller.
409 * => if page is swap-backed, then "uobj" should be NULL.
410 * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
411 * for swap-backed memory, "pg" can be NULL if there is no page
412 * of interest [sometimes the case for the pagedaemon]
413 * => "ppsp_ptr" should point to an array of npages vm_page pointers
414 * for possible cluster building
415 * => flags (first two for non-swap-backed pages)
416 * PGO_ALLPAGES: all pages in uobj are valid targets
417 * PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
418 * PGO_SYNCIO: do SYNC I/O (no async)
419 * PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
420 * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
421 * if (!uobj) start is the (daddr_t) of the starting swapblk
422 * => return state:
423 * 1. we return the VM_PAGER status code of the pageout
424 * 2. we return with the page queues unlocked
425 * 3. if (uobj != NULL) [!swap_backed] we return with
426 * uobj locked _only_ if PGO_PDFREECLUST is set
427 * AND result != VM_PAGER_PEND. in all other cases
428 * we return with uobj unlocked. [this is a hack
429 * that allows the pagedaemon to save one lock/unlock
430 * pair in the !swap_backed case since we have to
431 * lock the uobj to drop the cluster anyway]
432 * 4. on errors we always drop the cluster. thus, if we return
433 * !PEND, !OK, then the caller only has to worry about
434 * un-busying the main page (not the cluster pages).
435 * 5. on success, if !PGO_PDFREECLUST, we return the cluster
436 * with all pages busy (caller must un-busy and check
437 * wanted/released flags).
438 */
439
440 int
441 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
442 struct uvm_object *uobj; /* IN */
443 struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
444 int *npages; /* IN/OUT */
445 int flags; /* IN */
446 vaddr_t start, stop; /* IN, IN */
447 {
448 int result;
449 daddr_t swblk;
450 struct vm_page **ppsp = *ppsp_ptr;
451
452 /*
453 * note that uobj is null if we are doing a swap-backed pageout.
454 * note that uobj is !null if we are doing normal object pageout.
455 * note that the page queues must be locked to cluster.
456 */
457
458 if (uobj) { /* if !swap-backed */
459
460 /*
461 * attempt to build a cluster for pageout using its
462 * make-put-cluster function (if it has one).
463 */
464
465 if (uobj->pgops->pgo_mk_pcluster) {
466 ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
467 npages, pg, flags, start, stop);
468 *ppsp_ptr = ppsp; /* update caller's pointer */
469 } else {
470 ppsp[0] = pg;
471 *npages = 1;
472 }
473
474 swblk = 0; /* XXX: keep gcc happy */
475
476 } else {
477
478 /*
479 * for swap-backed pageout, the caller (the pagedaemon) has
480 * already built the cluster for us. the starting swap
481 * block we are writing to has been passed in as "start."
482 * "pg" could be NULL if there is no page we are especially
483 * interested in (in which case the whole cluster gets dropped
484 * in the event of an error or a sync "done").
485 */
486 swblk = (daddr_t) start;
487 /* ppsp and npages should be ok */
488 }
489
490 /* now that we've clustered we can unlock the page queues */
491 uvm_unlock_pageq();
492
493 /*
494 * now attempt the I/O. if we have a failure and we are
495 * clustered, we will drop the cluster and try again.
496 */
497
498 ReTry:
499 if (uobj) {
500 /* object is locked */
501 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
502 /* object is now unlocked */
503 } else {
504 /* nothing locked */
505 result = uvm_swap_put(swblk, ppsp, *npages, flags);
506 /* nothing locked */
507 }
508
509 /*
510 * we have attempted the I/O.
511 *
512 * if the I/O was a success then:
513 * if !PGO_PDFREECLUST, we return the cluster to the
514 * caller (who must un-busy all pages)
515 * else we un-busy cluster pages for the pagedaemon
516 *
517 * if I/O is pending (async i/o) then we return the pending code.
518 * [in this case the async i/o done function must clean up when
519 * i/o is done...]
520 */
521
522 if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
523 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
524 /*
525 * drop cluster and relock object (only if I/O is
526 * not pending)
527 */
528 if (uobj)
529 /* required for dropcluster */
530 simple_lock(&uobj->vmobjlock);
531 if (*npages > 1 || pg == NULL)
532 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
533 PGO_PDFREECLUST, 0);
534 /* if (uobj): object still locked, as per
535 * return-state item #3 */
536 }
537 return (result);
538 }
539
540 /*
541 * a pager error occured. if we have clustered, we drop the
542 * cluster and try again.
543 */
544
545 if (*npages > 1 || pg == NULL) {
546 if (uobj)
547 simple_lock(&uobj->vmobjlock);
548 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
549 swblk);
550 if (pg != NULL)
551 goto ReTry;
552 }
553
554 /*
555 * a pager error occured (even after dropping the cluster, if there
556 * was one). give up! the caller only has one page ("pg")
557 * to worry about.
558 */
559
560 if (uobj && (flags & PGO_PDFREECLUST) != 0)
561 simple_lock(&uobj->vmobjlock);
562 return(result);
563 }
564
565 /*
566 * uvm_pager_dropcluster: drop a cluster we have built (because we
567 * got an error, or, if PGO_PDFREECLUST we are un-busying the
568 * cluster pages on behalf of the pagedaemon).
569 *
570 * => uobj, if non-null, is a non-swap-backed object that is
571 * locked by the caller. we return with this object still
572 * locked.
573 * => page queues are not locked
574 * => pg is our page of interest (the one we clustered around, can be null)
575 * => ppsp/npages is our current cluster
576 * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
577 * pages on behalf of the pagedaemon.
578 * PGO_REALLOCSWAP: drop previously allocated swap slots for
579 * clustered swap-backed pages (except for "pg" if !NULL)
580 * "swblk" is the start of swap alloc (e.g. for ppsp[0])
581 * [only meaningful if swap-backed (uobj == NULL)]
582 */
583
584
585 void
586 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
587 struct uvm_object *uobj; /* IN */
588 struct vm_page *pg, **ppsp; /* IN, IN/OUT */
589 int *npages; /* IN/OUT */
590 int flags;
591 int swblk; /* valid if (uobj == NULL &&
592 PGO_REALLOCSWAP) */
593 {
594 int lcv;
595 boolean_t obj_is_alive;
596 struct uvm_object *saved_uobj;
597
598 /*
599 * if we need to reallocate swap space for the cluster we are dropping
600 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
601 * allocation now. save a block for "pg" if it is non-NULL.
602 *
603 * note that we will zap the object's pointer to swap in the "for" loop
604 * below...
605 */
606
607 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
608 if (pg)
609 uvm_swap_free(swblk + 1, *npages - 1);
610 else
611 uvm_swap_free(swblk, *npages);
612 }
613
614 /*
615 * drop all pages but "pg"
616 */
617
618 for (lcv = 0 ; lcv < *npages ; lcv++) {
619
620 /* skip "pg" or empty slot */
621 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
622 continue;
623
624 /*
625 * if swap-backed, gain lock on object that owns page. note
626 * that PQ_ANON bit can't change as long as we are holding
627 * the PG_BUSY bit (so there is no need to lock the page
628 * queues to test it).
629 *
630 * once we have the lock, dispose of the pointer to swap, if
631 * requested
632 */
633 if (!uobj) {
634 if (ppsp[lcv]->pqflags & PQ_ANON) {
635 simple_lock(&ppsp[lcv]->uanon->an_lock);
636 if (flags & PGO_REALLOCSWAP)
637 /* zap swap block */
638 ppsp[lcv]->uanon->an_swslot = 0;
639 } else {
640 simple_lock(&ppsp[lcv]->uobject->vmobjlock);
641 if (flags & PGO_REALLOCSWAP)
642 uao_set_swslot(ppsp[lcv]->uobject,
643 ppsp[lcv]->offset >> PAGE_SHIFT, 0);
644 }
645 }
646
647 /* did someone want the page while we had it busy-locked? */
648 if (ppsp[lcv]->flags & PG_WANTED) {
649 /* still holding obj lock */
650 wakeup(ppsp[lcv]);
651 }
652
653 /* if page was released, release it. otherwise un-busy it */
654 if (ppsp[lcv]->flags & PG_RELEASED) {
655
656 if (ppsp[lcv]->pqflags & PQ_ANON) {
657 /* so that anfree will free */
658 ppsp[lcv]->flags &= ~(PG_BUSY);
659 UVM_PAGE_OWN(ppsp[lcv], NULL);
660
661 pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
662 VM_PROT_NONE); /* be safe */
663 simple_unlock(&ppsp[lcv]->uanon->an_lock);
664 /* kills anon and frees pg */
665 uvm_anfree(ppsp[lcv]->uanon);
666
667 continue;
668 }
669
670 /*
671 * pgo_releasepg will dump the page for us
672 */
673
674 #ifdef DIAGNOSTIC
675 if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
676 panic("uvm_pager_dropcluster: no releasepg "
677 "function");
678 #endif
679 saved_uobj = ppsp[lcv]->uobject;
680 obj_is_alive =
681 saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
682
683 #ifdef DIAGNOSTIC
684 /* for normal objects, "pg" is still PG_BUSY by us,
685 * so obj can't die */
686 if (uobj && !obj_is_alive)
687 panic("uvm_pager_dropcluster: object died "
688 "with active page");
689 #endif
690 /* only unlock the object if it is still alive... */
691 if (obj_is_alive && saved_uobj != uobj)
692 simple_unlock(&saved_uobj->vmobjlock);
693
694 /*
695 * XXXCDC: suppose uobj died in the pgo_releasepg?
696 * how pass that
697 * info up to caller. we are currently ignoring it...
698 */
699
700 continue; /* next page */
701
702 } else {
703 ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
704 UVM_PAGE_OWN(ppsp[lcv], NULL);
705 }
706
707 /*
708 * if we are operating on behalf of the pagedaemon and we
709 * had a successful pageout update the page!
710 */
711 if (flags & PGO_PDFREECLUST) {
712 pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
713 pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
714 ppsp[lcv]->flags |= PG_CLEAN;
715 }
716
717 /* if anonymous cluster, unlock object and move on */
718 if (!uobj) {
719 if (ppsp[lcv]->pqflags & PQ_ANON)
720 simple_unlock(&ppsp[lcv]->uanon->an_lock);
721 else
722 simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
723 }
724
725 }
726
727 /*
728 * drop to a cluster of 1 page ("pg") if requested
729 */
730
731 if (pg && (flags & PGO_PDFREECLUST) == 0) {
732 /*
733 * if we are not a successful pageout, we make a 1 page cluster.
734 */
735 ppsp[0] = pg;
736 *npages = 1;
737
738 /*
739 * assign new swap block to new cluster, if anon backed
740 */
741 if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
742 if (pg->pqflags & PQ_ANON) {
743 simple_lock(&pg->uanon->an_lock);
744 pg->uanon->an_swslot = swblk; /* reassign */
745 simple_unlock(&pg->uanon->an_lock);
746 } else {
747 simple_lock(&pg->uobject->vmobjlock);
748 uao_set_swslot(pg->uobject,
749 pg->offset >> PAGE_SHIFT, swblk);
750 simple_unlock(&pg->uobject->vmobjlock);
751 }
752 }
753 }
754 }
755
756 /*
757 * interrupt-context iodone handler for nested i/o bufs.
758 *
759 * => must be at splbio().
760 */
761
762 void
763 uvm_aio_biodone1(bp)
764 struct buf *bp;
765 {
766 struct buf *mbp = bp->b_private;
767
768 #ifdef DIAGNOSTIC
769 if (mbp == bp) {
770 panic("uvm_aio_biodone1: mbp == bp %p", bp);
771 }
772 #endif
773
774 if (bp->b_flags & B_ERROR) {
775 mbp->b_flags |= B_ERROR;
776 mbp->b_error = bp->b_error;
777 }
778 mbp->b_resid -= bp->b_bcount;
779 pool_put(&bufpool, bp);
780 if (mbp->b_resid == 0) {
781 biodone(mbp);
782 }
783 }
784
785 /*
786 * interrupt-context iodone handler for single-buf i/os
787 * or the top-level buf of a nested-buf i/o.
788 *
789 * => must be at splbio().
790 */
791
792 void
793 uvm_aio_biodone(bp)
794 struct buf *bp;
795 {
796 /* reset b_iodone for when this is a single-buf i/o. */
797 bp->b_iodone = uvm_aio_aiodone;
798
799 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
800 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
801 wakeup(&uvm.aiodoned);
802 simple_unlock(&uvm.aiodoned_lock);
803 }
804
805 /*
806 * uvm_aio_aiodone: do iodone processing for async i/os.
807 * this should be called in thread context, not interrupt context.
808 */
809
810 void
811 uvm_aio_aiodone(bp)
812 struct buf *bp;
813 {
814 int npages = bp->b_bufsize >> PAGE_SHIFT;
815 struct vm_page *pg, *pgs[npages];
816 struct uvm_object *uobj;
817 int s, i;
818 boolean_t release, write, swap;
819 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
820 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
821
822 release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
823 write = (bp->b_flags & B_READ) == 0;
824 uobj = NULL;
825 for (i = 0; i < npages; i++) {
826 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
827 }
828 uvm_pagermapout((vaddr_t)bp->b_data, npages);
829 for (i = 0; i < npages; i++) {
830 pg = pgs[i];
831
832 if (i == 0) {
833 swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
834 if (!swap) {
835 uobj = pg->uobject;
836 simple_lock(&uobj->vmobjlock);
837 }
838 }
839 #ifdef DIAGNOSTIC
840 if (!swap && pg->uobject != uobj) {
841 panic("uvm_aio_aiodone: mismatched pg %d %p uobj %p",
842 i, pg, uobj);
843 }
844 #endif
845
846 if (swap) {
847 if (pg->pqflags & PQ_ANON) {
848 simple_lock(&pg->uanon->an_lock);
849 } else {
850 simple_lock(&pg->uobject->vmobjlock);
851 }
852 }
853
854 /*
855 * if this is a read and we got an error, mark the pages
856 * PG_RELEASED so that uvm_page_unbusy() will free them.
857 */
858
859 if (release) {
860 if (pg->pqflags & PQ_ANON) {
861 pg->flags &= ~(PG_BUSY);
862 UVM_PAGE_OWN(pg, NULL);
863 simple_unlock(&pg->uanon->an_lock);
864 uvm_anfree(pg->uanon);
865 } else {
866 uobj->pgops->pgo_releasepg(pg, NULL);
867 if (swap) {
868 simple_unlock(&pg->uobject->vmobjlock);
869 }
870 }
871 continue;
872 }
873
874 #ifdef DIAGNOSTIC
875 if (write && pgs[i]->flags & PG_FAKE) {
876 panic("uvm_aio_aiodone: wrote PG_FAKE page %p", pgs[i]);
877 }
878 #endif
879
880 /*
881 * if this is a read and the page is PG_FAKE
882 * or this was a write, mark the page PG_CLEAN and not PG_FAKE.
883 */
884
885 if (pgs[i]->flags & PG_FAKE || write) {
886 pmap_clear_reference(PMAP_PGARG(pgs[i]));
887 pmap_clear_modify(PMAP_PGARG(pgs[i]));
888 pgs[i]->flags |= PG_CLEAN;
889 pgs[i]->flags &= ~PG_FAKE;
890 }
891 if (swap) {
892 if (pg->pqflags & PQ_ANON) {
893 simple_unlock(&pg->uanon->an_lock);
894 } else {
895 simple_unlock(&pg->uobject->vmobjlock);
896 }
897 }
898 }
899 uvm_page_unbusy(pgs, npages);
900 if (!swap) {
901 simple_unlock(&uobj->vmobjlock);
902 }
903
904 s = splbio();
905 pool_put(&bufpool, bp);
906 splx(s);
907 }
908
909 /*
910 * translate unix errno values to VM_PAGER_*.
911 */
912
913 int
914 uvm_errno2vmerror(errno)
915 int errno;
916 {
917 switch (errno) {
918 case 0:
919 return VM_PAGER_OK;
920 case EINVAL:
921 return VM_PAGER_BAD;
922 case EINPROGRESS:
923 return VM_PAGER_PEND;
924 case EIO:
925 return VM_PAGER_ERROR;
926 case EAGAIN:
927 return VM_PAGER_AGAIN;
928 case EBUSY:
929 return VM_PAGER_UNLOCK;
930 default:
931 return VM_PAGER_ERROR;
932 }
933 }
934