Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.36
      1 /*	$NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35  */
     36 
     37 #include "opt_uvmhist.h"
     38 
     39 /*
     40  * uvm_pager.c: generic functions used to assist the pagers.
     41  */
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/proc.h>
     46 #include <sys/malloc.h>
     47 #include <sys/pool.h>
     48 #include <sys/vnode.h>
     49 
     50 #define UVM_PAGER
     51 #include <uvm/uvm.h>
     52 
     53 struct pool *uvm_aiobuf_pool;
     54 
     55 /*
     56  * list of uvm pagers in the system
     57  */
     58 
     59 extern struct uvm_pagerops uvm_deviceops;
     60 extern struct uvm_pagerops uvm_vnodeops;
     61 extern struct uvm_pagerops ubc_pager;
     62 
     63 struct uvm_pagerops *uvmpagerops[] = {
     64 	&aobj_pager,
     65 	&uvm_deviceops,
     66 	&uvm_vnodeops,
     67 	&ubc_pager,
     68 };
     69 
     70 /*
     71  * the pager map: provides KVA for I/O
     72  */
     73 
     74 vm_map_t pager_map;		/* XXX */
     75 simple_lock_data_t pager_map_wanted_lock;
     76 boolean_t pager_map_wanted;	/* locked by pager map */
     77 static vaddr_t emergva;
     78 static boolean_t emerginuse;
     79 
     80 /*
     81  * uvm_pager_init: init pagers (at boot time)
     82  */
     83 
     84 void
     85 uvm_pager_init()
     86 {
     87 	int lcv;
     88 
     89 	/*
     90 	 * init pager map
     91 	 */
     92 
     93 	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
     94 	 			    PAGER_MAP_SIZE, 0, FALSE, NULL);
     95 	simple_lock_init(&pager_map_wanted_lock);
     96 	pager_map_wanted = FALSE;
     97 	emergva = uvm_km_valloc(kernel_map, MAXBSIZE);
     98 	emerginuse = FALSE;
     99 
    100 	/*
    101 	 * init ASYNC I/O queue
    102 	 */
    103 
    104 	TAILQ_INIT(&uvm.aio_done);
    105 
    106 	/*
    107 	 * call pager init functions
    108 	 */
    109 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
    110 	    lcv++) {
    111 		if (uvmpagerops[lcv]->pgo_init)
    112 			uvmpagerops[lcv]->pgo_init();
    113 	}
    114 }
    115 
    116 /*
    117  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    118  *
    119  * we basically just map in a blank map entry to reserve the space in the
    120  * map and then use pmap_enter() to put the mappings in by hand.
    121  */
    122 
    123 vaddr_t
    124 uvm_pagermapin(pps, npages, flags)
    125 	struct vm_page **pps;
    126 	int npages;
    127 	int flags;
    128 {
    129 	vsize_t size;
    130 	vaddr_t kva;
    131 	vaddr_t cva;
    132 	struct vm_page *pp;
    133 	vm_prot_t prot;
    134 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    135 
    136 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0);
    137 
    138 	/*
    139 	 * compute protection.  outgoing I/O only needs read
    140 	 * access to the page, whereas incoming needs read/write.
    141 	 */
    142 
    143 	prot = VM_PROT_READ;
    144 	if (flags & UVMPAGER_MAPIN_READ)
    145 		prot |= VM_PROT_WRITE;
    146 
    147 ReStart:
    148 	size = npages << PAGE_SHIFT;
    149 	kva = 0;			/* let system choose VA */
    150 
    151 	if (uvm_map(pager_map, &kva, size, NULL,
    152 	      UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
    153 		if (curproc == uvm.pagedaemon_proc) {
    154 			simple_lock(&pager_map_wanted_lock);
    155 			if (emerginuse) {
    156 				UVM_UNLOCK_AND_WAIT(&emergva,
    157 				    &pager_map_wanted_lock, FALSE,
    158 				    "emergva", 0);
    159 				goto ReStart;
    160 			}
    161 			emerginuse = TRUE;
    162 			simple_unlock(&pager_map_wanted_lock);
    163 			kva = emergva;
    164 			KASSERT(npages <= MAXBSIZE >> PAGE_SHIFT);
    165 			goto enter;
    166 		}
    167 		if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) {
    168 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    169 			return(0);
    170 		}
    171 		simple_lock(&pager_map_wanted_lock);
    172 		pager_map_wanted = TRUE;
    173 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    174 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
    175 		    "pager_map", 0);
    176 		goto ReStart;
    177 	}
    178 
    179 enter:
    180 	/* got it */
    181 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    182 		pp = *pps++;
    183 #ifdef DEBUG
    184 		if ((pp->flags & PG_BUSY) == 0)
    185 			panic("uvm_pagermapin: pg %p not busy", pp);
    186 #endif
    187 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
    188 		    prot, PMAP_WIRED | prot);
    189 	}
    190 
    191 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    192 	return(kva);
    193 }
    194 
    195 /*
    196  * uvm_pagermapout: remove pager_map mapping
    197  *
    198  * we remove our mappings by hand and then remove the mapping (waking
    199  * up anyone wanting space).
    200  */
    201 
    202 void
    203 uvm_pagermapout(kva, npages)
    204 	vaddr_t kva;
    205 	int npages;
    206 {
    207 	vsize_t size = npages << PAGE_SHIFT;
    208 	vm_map_entry_t entries;
    209 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    210 
    211 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    212 
    213 	/*
    214 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    215 	 */
    216 
    217 	if (kva == emergva) {
    218 		simple_lock(&pager_map_wanted_lock);
    219 		emerginuse = FALSE;
    220 		wakeup(&emergva);
    221 		simple_unlock(&pager_map_wanted_lock);
    222 		entries = NULL;
    223 		goto remove;
    224 	}
    225 
    226 	vm_map_lock(pager_map);
    227 	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
    228 	simple_lock(&pager_map_wanted_lock);
    229 	if (pager_map_wanted) {
    230 		pager_map_wanted = FALSE;
    231 		wakeup(pager_map);
    232 	}
    233 	simple_unlock(&pager_map_wanted_lock);
    234 	vm_map_unlock(pager_map);
    235 remove:
    236 	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
    237 	if (entries)
    238 		uvm_unmap_detach(entries, 0);
    239 
    240 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    241 }
    242 
    243 /*
    244  * uvm_mk_pcluster
    245  *
    246  * generic "make 'pager put' cluster" function.  a pager can either
    247  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
    248  * generic function, or [3] set it to a pager specific function.
    249  *
    250  * => caller must lock object _and_ pagequeues (since we need to look
    251  *    at active vs. inactive bits, etc.)
    252  * => caller must make center page busy and write-protect it
    253  * => we mark all cluster pages busy for the caller
    254  * => the caller must unbusy all pages (and check wanted/released
    255  *    status if it drops the object lock)
    256  * => flags:
    257  *      PGO_ALLPAGES:  all pages in object are valid targets
    258  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
    259  *      PGO_DOACTCLUST: include active pages in cluster.
    260  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
    261  *              PG_CLEANCHK is only a hint, but clearing will help reduce
    262  *		the number of calls we make to the pmap layer.
    263  */
    264 
    265 struct vm_page **
    266 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
    267 	struct uvm_object *uobj;	/* IN */
    268 	struct vm_page **pps, *center;  /* IN/OUT, IN */
    269 	int *npages, flags;		/* IN/OUT, IN */
    270 	voff_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
    271 {
    272 	struct vm_page **ppsp, *pclust;
    273 	voff_t lo, hi, curoff;
    274 	int center_idx, forward, incr;
    275 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
    276 
    277 	/*
    278 	 * center page should already be busy and write protected.  XXX:
    279 	 * suppose page is wired?  if we lock, then a process could
    280 	 * fault/block on it.  if we don't lock, a process could write the
    281 	 * pages in the middle of an I/O.  (consider an msync()).  let's
    282 	 * lock it for now (better to delay than corrupt data?).
    283 	 */
    284 
    285 	/*
    286 	 * get cluster boundaries, check sanity, and apply our limits as well.
    287 	 */
    288 
    289 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
    290 	if ((flags & PGO_ALLPAGES) == 0) {
    291 		if (lo < mlo)
    292 			lo = mlo;
    293 		if (hi > mhi)
    294 			hi = mhi;
    295 	}
    296 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
    297 #ifdef DIAGNOSTIC
    298 		printf("uvm_mk_pcluster uobj %p npages %d lo 0x%llx hi 0x%llx "
    299 		       "flags 0x%x\n", uobj, *npages, (long long)lo,
    300 		       (long long)hi, flags);
    301 #endif
    302 		pps[0] = center;
    303 		*npages = 1;
    304 		return(pps);
    305 	}
    306 
    307 	/*
    308 	 * now determine the center and attempt to cluster around the
    309 	 * edges
    310 	 */
    311 
    312 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
    313 	pps[center_idx] = center;	/* plug in the center page */
    314 	ppsp = &pps[center_idx];
    315 	*npages = 1;
    316 
    317 	/*
    318 	 * attempt to cluster around the left [backward], and then
    319 	 * the right side [forward].
    320 	 *
    321 	 * note that for inactive pages (pages that have been deactivated)
    322 	 * there are no valid mappings and PG_CLEAN should be up to date.
    323 	 * [i.e. there is no need to query the pmap with pmap_is_modified
    324 	 * since there are no mappings].
    325 	 */
    326 
    327 	for (forward  = 0 ; forward <= 1 ; forward++) {
    328 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
    329 		curoff = center->offset + incr;
    330 		for ( ;(forward == 0 && curoff >= lo) ||
    331 		       (forward && curoff < hi);
    332 		      curoff += incr) {
    333 
    334 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
    335 			if (pclust == NULL) {
    336 				break;			/* no page */
    337 			}
    338 			/* handle active pages */
    339 			/* NOTE: inactive pages don't have pmap mappings */
    340 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
    341 				if ((flags & PGO_DOACTCLUST) == 0) {
    342 					/* dont want mapped pages at all */
    343 					break;
    344 				}
    345 
    346 				/* make sure "clean" bit is sync'd */
    347 				if ((pclust->flags & PG_CLEANCHK) == 0) {
    348 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
    349 					   == PG_CLEAN &&
    350 					   pmap_is_modified(pclust))
    351 						pclust->flags &= ~PG_CLEAN;
    352 
    353 					/* now checked */
    354 					pclust->flags |= PG_CLEANCHK;
    355 				}
    356 			}
    357 
    358 			/* is page available for cleaning and does it need it */
    359 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
    360 				break;	/* page is already clean or is busy */
    361 			}
    362 
    363 			/* yes!   enroll the page in our array */
    364 			pclust->flags |= PG_BUSY;		/* busy! */
    365 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
    366 
    367 			/* XXX: protect wired page?   see above comment. */
    368 			pmap_page_protect(pclust, VM_PROT_READ);
    369 			if (!forward) {
    370 				ppsp--;			/* back up one page */
    371 				*ppsp = pclust;
    372 			} else {
    373 				/* move forward one page */
    374 				ppsp[*npages] = pclust;
    375 			}
    376 			(*npages)++;
    377 		}
    378 	}
    379 
    380 	/*
    381 	 * done!  return the cluster array to the caller!!!
    382 	 */
    383 
    384 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    385 	return(ppsp);
    386 }
    387 
    388 /*
    389  * uvm_pager_put: high level pageout routine
    390  *
    391  * we want to pageout page "pg" to backing store, clustering if
    392  * possible.
    393  *
    394  * => page queues must be locked by caller
    395  * => if page is not swap-backed, then "uobj" points to the object
    396  *	backing it.   this object should be locked by the caller.
    397  * => if page is swap-backed, then "uobj" should be NULL.
    398  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
    399  *    for swap-backed memory, "pg" can be NULL if there is no page
    400  *    of interest [sometimes the case for the pagedaemon]
    401  * => "ppsp_ptr" should point to an array of npages vm_page pointers
    402  *	for possible cluster building
    403  * => flags (first two for non-swap-backed pages)
    404  *	PGO_ALLPAGES: all pages in uobj are valid targets
    405  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
    406  *	PGO_SYNCIO: do SYNC I/O (no async)
    407  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
    408  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
    409  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
    410  * => return state:
    411  *	1. we return the VM_PAGER status code of the pageout
    412  *	2. we return with the page queues unlocked
    413  *	3. if (uobj != NULL) [!swap_backed] we return with
    414  *		uobj locked _only_ if PGO_PDFREECLUST is set
    415  *		AND result != VM_PAGER_PEND.   in all other cases
    416  *		we return with uobj unlocked.   [this is a hack
    417  *		that allows the pagedaemon to save one lock/unlock
    418  *		pair in the !swap_backed case since we have to
    419  *		lock the uobj to drop the cluster anyway]
    420  *	4. on errors we always drop the cluster.   thus, if we return
    421  *		!PEND, !OK, then the caller only has to worry about
    422  *		un-busying the main page (not the cluster pages).
    423  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
    424  *		with all pages busy (caller must un-busy and check
    425  *		wanted/released flags).
    426  */
    427 
    428 int
    429 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
    430 	struct uvm_object *uobj;	/* IN */
    431 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
    432 	int *npages;			/* IN/OUT */
    433 	int flags;			/* IN */
    434 	voff_t start, stop;		/* IN, IN */
    435 {
    436 	int result;
    437 	daddr_t swblk;
    438 	struct vm_page **ppsp = *ppsp_ptr;
    439 	UVMHIST_FUNC("uvm_pager_put"); UVMHIST_CALLED(ubchist);
    440 
    441 	/*
    442 	 * note that uobj is null  if we are doing a swap-backed pageout.
    443 	 * note that uobj is !null if we are doing normal object pageout.
    444 	 * note that the page queues must be locked to cluster.
    445 	 */
    446 
    447 	if (uobj) {	/* if !swap-backed */
    448 
    449 		/*
    450 		 * attempt to build a cluster for pageout using its
    451 		 * make-put-cluster function (if it has one).
    452 		 */
    453 
    454 		if (uobj->pgops->pgo_mk_pcluster) {
    455 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
    456 			    npages, pg, flags, start, stop);
    457 			*ppsp_ptr = ppsp;  /* update caller's pointer */
    458 		} else {
    459 			ppsp[0] = pg;
    460 			*npages = 1;
    461 		}
    462 
    463 		swblk = 0;		/* XXX: keep gcc happy */
    464 
    465 	} else {
    466 
    467 		/*
    468 		 * for swap-backed pageout, the caller (the pagedaemon) has
    469 		 * already built the cluster for us.   the starting swap
    470 		 * block we are writing to has been passed in as "start."
    471 		 * "pg" could be NULL if there is no page we are especially
    472 		 * interested in (in which case the whole cluster gets dropped
    473 		 * in the event of an error or a sync "done").
    474 		 */
    475 		swblk = (daddr_t) start;
    476 		/* ppsp and npages should be ok */
    477 	}
    478 
    479 	/* now that we've clustered we can unlock the page queues */
    480 	uvm_unlock_pageq();
    481 
    482 	/*
    483 	 * now attempt the I/O.   if we have a failure and we are
    484 	 * clustered, we will drop the cluster and try again.
    485 	 */
    486 
    487 ReTry:
    488 	if (uobj) {
    489 		/* object is locked */
    490 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
    491 		UVMHIST_LOG(ubchist, "put -> %d", result, 0,0,0);
    492 		/* object is now unlocked */
    493 	} else {
    494 		/* nothing locked */
    495 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
    496 		/* nothing locked */
    497 	}
    498 
    499 	/*
    500 	 * we have attempted the I/O.
    501 	 *
    502 	 * if the I/O was a success then:
    503 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
    504 	 *		caller (who must un-busy all pages)
    505 	 *	else we un-busy cluster pages for the pagedaemon
    506 	 *
    507 	 * if I/O is pending (async i/o) then we return the pending code.
    508 	 * [in this case the async i/o done function must clean up when
    509 	 *  i/o is done...]
    510 	 */
    511 
    512 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
    513 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
    514 			/*
    515 			 * drop cluster and relock object (only if I/O is
    516 			 * not pending)
    517 			 */
    518 			if (uobj)
    519 				/* required for dropcluster */
    520 				simple_lock(&uobj->vmobjlock);
    521 			if (*npages > 1 || pg == NULL)
    522 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
    523 				    PGO_PDFREECLUST);
    524 			/* if (uobj): object still locked, as per
    525 			 * return-state item #3 */
    526 		}
    527 		return (result);
    528 	}
    529 
    530 	/*
    531 	 * a pager error occured.
    532 	 * for transient errors, drop to a cluster of 1 page ("pg")
    533 	 * and try again.  for hard errors, don't bother retrying.
    534 	 */
    535 
    536 	if (*npages > 1 || pg == NULL) {
    537 		if (uobj) {
    538 			simple_lock(&uobj->vmobjlock);
    539 		}
    540 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
    541 
    542 		/*
    543 		 * for failed swap-backed pageouts with a "pg",
    544 		 * we need to reset pg's swslot to either:
    545 		 * "swblk" (for transient errors, so we can retry),
    546 		 * or 0 (for hard errors).
    547 		 */
    548 
    549 		if (uobj == NULL && pg != NULL) {
    550 			int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
    551 			if (pg->pqflags & PQ_ANON) {
    552 				simple_lock(&pg->uanon->an_lock);
    553 				pg->uanon->an_swslot = nswblk;
    554 				simple_unlock(&pg->uanon->an_lock);
    555 			} else {
    556 				simple_lock(&pg->uobject->vmobjlock);
    557 				uao_set_swslot(pg->uobject,
    558 					       pg->offset >> PAGE_SHIFT,
    559 					       nswblk);
    560 				simple_unlock(&pg->uobject->vmobjlock);
    561 			}
    562 		}
    563 		if (result == VM_PAGER_AGAIN) {
    564 
    565 			/*
    566 			 * for transient failures, free all the swslots that
    567 			 * we're not going to retry with.
    568 			 */
    569 
    570 			if (uobj == NULL) {
    571 				if (pg) {
    572 					uvm_swap_free(swblk + 1, *npages - 1);
    573 				} else {
    574 					uvm_swap_free(swblk, *npages);
    575 				}
    576 			}
    577 			if (pg) {
    578 				ppsp[0] = pg;
    579 				*npages = 1;
    580 				goto ReTry;
    581 			}
    582 		} else if (uobj == NULL) {
    583 
    584 			/*
    585 			 * for hard errors on swap-backed pageouts,
    586 			 * mark the swslots as bad.  note that we do not
    587 			 * free swslots that we mark bad.
    588 			 */
    589 
    590 			uvm_swap_markbad(swblk, *npages);
    591 		}
    592 	}
    593 
    594 	/*
    595 	 * a pager error occured (even after dropping the cluster, if there
    596 	 * was one).  give up! the caller only has one page ("pg")
    597 	 * to worry about.
    598 	 */
    599 
    600 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
    601 		simple_lock(&uobj->vmobjlock);
    602 	return(result);
    603 }
    604 
    605 /*
    606  * uvm_pager_dropcluster: drop a cluster we have built (because we
    607  * got an error, or, if PGO_PDFREECLUST we are un-busying the
    608  * cluster pages on behalf of the pagedaemon).
    609  *
    610  * => uobj, if non-null, is a non-swap-backed object that is
    611  *	locked by the caller.   we return with this object still
    612  *	locked.
    613  * => page queues are not locked
    614  * => pg is our page of interest (the one we clustered around, can be null)
    615  * => ppsp/npages is our current cluster
    616  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
    617  *	pages on behalf of the pagedaemon.
    618  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
    619  *		clustered swap-backed pages (except for "pg" if !NULL)
    620  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
    621  *		[only meaningful if swap-backed (uobj == NULL)]
    622  */
    623 
    624 void
    625 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags)
    626 	struct uvm_object *uobj;	/* IN */
    627 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
    628 	int *npages;			/* IN/OUT */
    629 	int flags;
    630 {
    631 	int lcv;
    632 	boolean_t obj_is_alive;
    633 	struct uvm_object *saved_uobj;
    634 
    635 	/*
    636 	 * drop all pages but "pg"
    637 	 */
    638 
    639 	for (lcv = 0 ; lcv < *npages ; lcv++) {
    640 
    641 		/* skip "pg" or empty slot */
    642 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
    643 			continue;
    644 
    645 		/*
    646 		 * if swap-backed, gain lock on object that owns page.  note
    647 		 * that PQ_ANON bit can't change as long as we are holding
    648 		 * the PG_BUSY bit (so there is no need to lock the page
    649 		 * queues to test it).
    650 		 *
    651 		 * once we have the lock, dispose of the pointer to swap, if
    652 		 * requested
    653 		 */
    654 		if (!uobj) {
    655 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    656 				simple_lock(&ppsp[lcv]->uanon->an_lock);
    657 				if (flags & PGO_REALLOCSWAP)
    658 					  /* zap swap block */
    659 					  ppsp[lcv]->uanon->an_swslot = 0;
    660 			} else {
    661 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
    662 				if (flags & PGO_REALLOCSWAP)
    663 					uao_set_swslot(ppsp[lcv]->uobject,
    664 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
    665 			}
    666 		}
    667 
    668 		/* did someone want the page while we had it busy-locked? */
    669 		if (ppsp[lcv]->flags & PG_WANTED) {
    670 			/* still holding obj lock */
    671 			wakeup(ppsp[lcv]);
    672 		}
    673 
    674 		/* if page was released, release it.  otherwise un-busy it */
    675 		if (ppsp[lcv]->flags & PG_RELEASED) {
    676 
    677 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    678 				/* so that anfree will free */
    679 				ppsp[lcv]->flags &= ~(PG_BUSY);
    680 				UVM_PAGE_OWN(ppsp[lcv], NULL);
    681 
    682 				pmap_page_protect(ppsp[lcv], VM_PROT_NONE);
    683 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    684 				/* kills anon and frees pg */
    685 				uvm_anfree(ppsp[lcv]->uanon);
    686 
    687 				continue;
    688 			}
    689 
    690 			/*
    691 			 * pgo_releasepg will dump the page for us
    692 			 */
    693 
    694 #ifdef DIAGNOSTIC
    695 			if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
    696 				panic("uvm_pager_dropcluster: no releasepg "
    697 				    "function");
    698 #endif
    699 			saved_uobj = ppsp[lcv]->uobject;
    700 			obj_is_alive =
    701 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
    702 
    703 #ifdef DIAGNOSTIC
    704 			/* for normal objects, "pg" is still PG_BUSY by us,
    705 			 * so obj can't die */
    706 			if (uobj && !obj_is_alive)
    707 				panic("uvm_pager_dropcluster: object died "
    708 				    "with active page");
    709 #endif
    710 			/* only unlock the object if it is still alive...  */
    711 			if (obj_is_alive && saved_uobj != uobj)
    712 				simple_unlock(&saved_uobj->vmobjlock);
    713 
    714 			/*
    715 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
    716 			 * how pass that
    717 			 * info up to caller.  we are currently ignoring it...
    718 			 */
    719 
    720 			continue;		/* next page */
    721 
    722 		} else {
    723 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
    724 			UVM_PAGE_OWN(ppsp[lcv], NULL);
    725 		}
    726 
    727 		/*
    728 		 * if we are operating on behalf of the pagedaemon and we
    729 		 * had a successful pageout update the page!
    730 		 */
    731 		if (flags & PGO_PDFREECLUST) {
    732 			pmap_clear_reference(ppsp[lcv]);
    733 			pmap_clear_modify(ppsp[lcv]);
    734 			ppsp[lcv]->flags |= PG_CLEAN;
    735 		}
    736 
    737 		/* if anonymous cluster, unlock object and move on */
    738 		if (!uobj) {
    739 			if (ppsp[lcv]->pqflags & PQ_ANON)
    740 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    741 			else
    742 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
    743 		}
    744 	}
    745 }
    746 
    747 /*
    748  * interrupt-context iodone handler for nested i/o bufs.
    749  *
    750  * => must be at splbio().
    751  */
    752 
    753 void
    754 uvm_aio_biodone1(bp)
    755 	struct buf *bp;
    756 {
    757 	struct buf *mbp = bp->b_private;
    758 
    759 	KASSERT(mbp != bp);
    760 	if (bp->b_flags & B_ERROR) {
    761 		mbp->b_flags |= B_ERROR;
    762 		mbp->b_error = bp->b_error;
    763 	}
    764 	mbp->b_resid -= bp->b_bcount;
    765 	pool_put(&bufpool, bp);
    766 	if (mbp->b_resid == 0) {
    767 		biodone(mbp);
    768 	}
    769 }
    770 
    771 /*
    772  * interrupt-context iodone handler for single-buf i/os
    773  * or the top-level buf of a nested-buf i/o.
    774  *
    775  * => must be at splbio().
    776  */
    777 
    778 void
    779 uvm_aio_biodone(bp)
    780 	struct buf *bp;
    781 {
    782 	/* reset b_iodone for when this is a single-buf i/o. */
    783 	bp->b_iodone = uvm_aio_aiodone;
    784 
    785 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
    786 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
    787 	wakeup(&uvm.aiodoned);
    788 	simple_unlock(&uvm.aiodoned_lock);
    789 }
    790 
    791 /*
    792  * uvm_aio_aiodone: do iodone processing for async i/os.
    793  * this should be called in thread context, not interrupt context.
    794  */
    795 
    796 void
    797 uvm_aio_aiodone(bp)
    798 	struct buf *bp;
    799 {
    800 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    801 	struct vm_page *pg, *pgs[npages];
    802 	struct uvm_object *uobj;
    803 	int s, i;
    804 	boolean_t release, write, swap;
    805 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    806 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    807 
    808 	release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
    809 	write = (bp->b_flags & B_READ) == 0;
    810 	/* XXXUBC B_NOCACHE is for swap pager, should be done differently */
    811 	if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) {
    812 		(*bioops.io_pageiodone)(bp);
    813 	}
    814 
    815 	uobj = NULL;
    816 	for (i = 0; i < npages; i++) {
    817 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    818 		UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0);
    819 	}
    820 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    821 	for (i = 0; i < npages; i++) {
    822 		pg = pgs[i];
    823 
    824 		if (i == 0) {
    825 			swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
    826 			if (!swap) {
    827 				uobj = pg->uobject;
    828 				simple_lock(&uobj->vmobjlock);
    829 			}
    830 		}
    831 		KASSERT(swap || pg->uobject == uobj);
    832 		if (swap) {
    833 			if (pg->pqflags & PQ_ANON) {
    834 				simple_lock(&pg->uanon->an_lock);
    835 			} else {
    836 				simple_lock(&pg->uobject->vmobjlock);
    837 			}
    838 		}
    839 
    840 		/*
    841 		 * if this is a read and we got an error, mark the pages
    842 		 * PG_RELEASED so that uvm_page_unbusy() will free them.
    843 		 */
    844 
    845 		if (release) {
    846 			pg->flags |= PG_RELEASED;
    847 			continue;
    848 		}
    849 		KASSERT(!write || (pgs[i]->flags & PG_FAKE) == 0);
    850 
    851 		/*
    852 		 * if this is a read and the page is PG_FAKE
    853 		 * or this was a write, mark the page PG_CLEAN and not PG_FAKE.
    854 		 */
    855 
    856 		if (pgs[i]->flags & PG_FAKE || write) {
    857 			pmap_clear_reference(pgs[i]);
    858 			pmap_clear_modify(pgs[i]);
    859 			pgs[i]->flags |= PG_CLEAN;
    860 			pgs[i]->flags &= ~PG_FAKE;
    861 		}
    862 		if (swap) {
    863 			if (pg->pqflags & PQ_ANON) {
    864 				simple_unlock(&pg->uanon->an_lock);
    865 			} else {
    866 				simple_unlock(&pg->uobject->vmobjlock);
    867 			}
    868 		}
    869 	}
    870 	uvm_page_unbusy(pgs, npages);
    871 	if (!swap) {
    872 		simple_unlock(&uobj->vmobjlock);
    873 	}
    874 
    875 	s = splbio();
    876 	if (write && (bp->b_flags & B_AGE) != 0) {
    877 		vwakeup(bp);
    878 	}
    879 	pool_put(&bufpool, bp);
    880 	splx(s);
    881 }
    882 
    883 /*
    884  * translate unix errno values to VM_PAGER_*.
    885  */
    886 
    887 int
    888 uvm_errno2vmerror(errno)
    889 	int errno;
    890 {
    891 	switch (errno) {
    892 	case 0:
    893 		return VM_PAGER_OK;
    894 	case EINVAL:
    895 		return VM_PAGER_BAD;
    896 	case EINPROGRESS:
    897 		return VM_PAGER_PEND;
    898 	case EIO:
    899 		return VM_PAGER_ERROR;
    900 	case EAGAIN:
    901 		return VM_PAGER_AGAIN;
    902 	case EBUSY:
    903 		return VM_PAGER_UNLOCK;
    904 	default:
    905 		return VM_PAGER_ERROR;
    906 	}
    907 }
    908