Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.16.4.5
      1 /*	$NetBSD: uvm_pager.c,v 1.16.4.5 1999/07/31 19:01:33 chs Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35  */
     36 
     37 #include "opt_pmap_new.h"
     38 #include "opt_uvmhist.h"
     39 
     40 /*
     41  * uvm_pager.c: generic functions used to assist the pagers.
     42  */
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 #include <sys/pool.h>
     49 #include <sys/vnode.h>
     50 
     51 #include <vm/vm.h>
     52 #include <vm/vm_page.h>
     53 #include <vm/vm_kern.h>
     54 
     55 #define UVM_PAGER
     56 #include <uvm/uvm.h>
     57 
     58 struct pool *uvm_aiobuf_pool;
     59 
     60 /*
     61  * list of uvm pagers in the system
     62  */
     63 
     64 extern struct uvm_pagerops aobj_pager;
     65 extern struct uvm_pagerops uvm_deviceops;
     66 extern struct uvm_pagerops uvm_vnodeops;
     67 extern struct uvm_pagerops ubc_pager;
     68 
     69 struct uvm_pagerops *uvmpagerops[] = {
     70 	&aobj_pager,
     71 	&uvm_deviceops,
     72 	&uvm_vnodeops,
     73 	&ubc_pager,
     74 };
     75 
     76 /*
     77  * the pager map: provides KVA for I/O
     78  */
     79 
     80 #define PAGER_MAP_SIZE       (4 * 1024 * 1024)
     81 vm_map_t pager_map;		/* XXX */
     82 simple_lock_data_t pager_map_wanted_lock;
     83 boolean_t pager_map_wanted;	/* locked by pager map */
     84 
     85 
     86 /*
     87  * uvm_pager_init: init pagers (at boot time)
     88  */
     89 
     90 void
     91 uvm_pager_init()
     92 {
     93 	int lcv;
     94 
     95 	/*
     96 	 * init pager map
     97 	 */
     98 
     99 	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
    100 	 			    PAGER_MAP_SIZE, 0, FALSE, NULL);
    101 	simple_lock_init(&pager_map_wanted_lock);
    102 	pager_map_wanted = FALSE;
    103 
    104 	/*
    105 	 * init ASYNC I/O queue
    106 	 */
    107 
    108 	TAILQ_INIT(&uvm.aio_done);
    109 
    110 	/*
    111 	 * call pager init functions
    112 	 */
    113 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
    114 	    lcv++) {
    115 		if (uvmpagerops[lcv]->pgo_init)
    116 			uvmpagerops[lcv]->pgo_init();
    117 	}
    118 }
    119 
    120 /*
    121  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    122  *
    123  * we basically just map in a blank map entry to reserve the space in the
    124  * map and then use pmap_enter() to put the mappings in by hand.
    125  *
    126  * XXX It would be nice to know the direction of the I/O, so that we can
    127  * XXX map only what is necessary.
    128  */
    129 
    130 vaddr_t
    131 uvm_pagermapin(pps, npages, waitf)
    132 	struct vm_page **pps;
    133 	int npages;
    134 	int waitf;
    135 {
    136 	vsize_t size;
    137 	vaddr_t kva;
    138 	vaddr_t cva;
    139 	struct vm_page *pp;
    140 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    141 
    142 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, waitf=%d)",
    143 	      pps, npages, waitf, 0);
    144 
    145 ReStart:
    146 	size = npages << PAGE_SHIFT;
    147 	kva = NULL;			/* let system choose VA */
    148 
    149 	if (uvm_map(pager_map, &kva, size, NULL,
    150 	      UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
    151 		if (waitf == M_NOWAIT) {
    152 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    153 			return(0);
    154 		}
    155 		simple_lock(&pager_map_wanted_lock);
    156 		pager_map_wanted = TRUE;
    157 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    158 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
    159 		    "pager_map",0);
    160 		goto ReStart;
    161 	}
    162 
    163 	/* got it */
    164 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    165 		pp = *pps++;
    166 #ifdef DEBUG
    167 		if ((pp->flags & PG_BUSY) == 0)
    168 			panic("uvm_pagermapin: page not busy");
    169 #endif
    170 
    171 		/*
    172 		 * XXX VM_PROT_DEFAULT includes VM_PROT_EXEC; is that
    173 		 * XXX really necessary?  It could lead to unnecessary
    174 		 * XXX instruction cache flushes.
    175 		 */
    176 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
    177 		    VM_PROT_DEFAULT, TRUE,
    178 		    VM_PROT_READ | VM_PROT_WRITE);
    179 	}
    180 
    181 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    182 	return(kva);
    183 }
    184 
    185 /*
    186  * uvm_pagermapout: remove pager_map mapping
    187  *
    188  * we remove our mappings by hand and then remove the mapping (waking
    189  * up anyone wanting space).
    190  */
    191 
    192 void
    193 uvm_pagermapout(kva, npages)
    194 	vaddr_t kva;
    195 	int npages;
    196 {
    197 	vsize_t size = npages << PAGE_SHIFT;
    198 	vm_map_entry_t entries;
    199 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    200 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    201 
    202 	/*
    203 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    204 	 */
    205 
    206 	vm_map_lock(pager_map);
    207 	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
    208 	simple_lock(&pager_map_wanted_lock);
    209 	if (pager_map_wanted) {
    210 		pager_map_wanted = FALSE;
    211 		wakeup(pager_map);
    212 	}
    213 	simple_unlock(&pager_map_wanted_lock);
    214 	vm_map_unlock(pager_map);
    215 	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
    216 	if (entries)
    217 		uvm_unmap_detach(entries, 0);
    218 
    219 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    220 }
    221 
    222 /*
    223  * uvm_mk_pcluster
    224  *
    225  * generic "make 'pager put' cluster" function.  a pager can either
    226  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
    227  * generic function, or [3] set it to a pager specific function.
    228  *
    229  * => caller must lock object _and_ pagequeues (since we need to look
    230  *    at active vs. inactive bits, etc.)
    231  * => caller must make center page busy and write-protect it
    232  * => we mark all cluster pages busy for the caller
    233  * => the caller must unbusy all pages (and check wanted/released
    234  *    status if it drops the object lock)
    235  * => flags:
    236  *      PGO_ALLPAGES:  all pages in object are valid targets
    237  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
    238  *      PGO_DOACTCLUST: include active pages in cluster.
    239  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
    240  *              PG_CLEANCHK is only a hint, but clearing will help reduce
    241  *		the number of calls we make to the pmap layer.
    242  */
    243 
    244 struct vm_page **
    245 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
    246 	struct uvm_object *uobj;	/* IN */
    247 	struct vm_page **pps, *center;  /* IN/OUT, IN */
    248 	int *npages, flags;		/* IN/OUT, IN */
    249 	vaddr_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
    250 {
    251 	struct vm_page **ppsp, *pclust;
    252 	vaddr_t lo, hi, curoff;
    253 	int center_idx, forward, incr;
    254 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
    255 
    256 	/*
    257 	 * center page should already be busy and write protected.  XXX:
    258 	 * suppose page is wired?  if we lock, then a process could
    259 	 * fault/block on it.  if we don't lock, a process could write the
    260 	 * pages in the middle of an I/O.  (consider an msync()).  let's
    261 	 * lock it for now (better to delay than corrupt data?).
    262 	 */
    263 
    264 	/*
    265 	 * get cluster boundaries, check sanity, and apply our limits as well.
    266 	 */
    267 
    268 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
    269 	if ((flags & PGO_ALLPAGES) == 0) {
    270 		if (lo < mlo)
    271 			lo = mlo;
    272 		if (hi > mhi)
    273 			hi = mhi;
    274 	}
    275 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
    276 #ifdef DIAGNOSTIC
    277 		printf("uvm_mk_pcluster uobj %p npages %d lo 0x%lx hi 0x%lx flags 0x%x\n",
    278 		       uobj, *npages, lo, hi, flags);
    279 #endif
    280 		pps[0] = center;
    281 		*npages = 1;
    282 		return(pps);
    283 	}
    284 
    285 	/*
    286 	 * now determine the center and attempt to cluster around the
    287 	 * edges
    288 	 */
    289 
    290 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
    291 	pps[center_idx] = center;	/* plug in the center page */
    292 	ppsp = &pps[center_idx];
    293 	*npages = 1;
    294 
    295 	/*
    296 	 * attempt to cluster around the left [backward], and then
    297 	 * the right side [forward].
    298 	 *
    299 	 * note that for inactive pages (pages that have been deactivated)
    300 	 * there are no valid mappings and PG_CLEAN should be up to date.
    301 	 * [i.e. there is no need to query the pmap with pmap_is_modified
    302 	 * since there are no mappings].
    303 	 */
    304 
    305 	for (forward  = 0 ; forward <= 1 ; forward++) {
    306 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
    307 		curoff = center->offset + incr;
    308 		for ( ;(forward == 0 && curoff >= lo) ||
    309 		       (forward && curoff < hi);
    310 		      curoff += incr) {
    311 
    312 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
    313 			if (pclust == NULL) {
    314 				break;			/* no page */
    315 			}
    316 			/* handle active pages */
    317 			/* NOTE: inactive pages don't have pmap mappings */
    318 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
    319 				if ((flags & PGO_DOACTCLUST) == 0) {
    320 					/* dont want mapped pages at all */
    321 					break;
    322 				}
    323 
    324 				/* make sure "clean" bit is sync'd */
    325 				if ((pclust->flags & PG_CLEANCHK) == 0) {
    326 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
    327 					   == PG_CLEAN &&
    328 					   pmap_is_modified(PMAP_PGARG(pclust)))
    329 					pclust->flags &= ~PG_CLEAN;
    330 					/* now checked */
    331 					pclust->flags |= PG_CLEANCHK;
    332 				}
    333 			}
    334 
    335 			/* is page available for cleaning and does it need it */
    336 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
    337 				break;	/* page is already clean or is busy */
    338 			}
    339 
    340 			/* yes!   enroll the page in our array */
    341 			pclust->flags |= PG_BUSY;		/* busy! */
    342 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
    343 
    344 			/* XXX: protect wired page?   see above comment. */
    345 			pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
    346 			if (!forward) {
    347 				ppsp--;			/* back up one page */
    348 				*ppsp = pclust;
    349 			} else {
    350 				/* move forward one page */
    351 				ppsp[*npages] = pclust;
    352 			}
    353 			(*npages)++;
    354 		}
    355 	}
    356 
    357 	/*
    358 	 * done!  return the cluster array to the caller!!!
    359 	 */
    360 
    361 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    362 	return(ppsp);
    363 }
    364 
    365 
    366 /*
    367  * uvm_shareprot: generic share protect routine
    368  *
    369  * => caller must lock map entry's map
    370  * => caller must lock object pointed to by map entry
    371  */
    372 
    373 void
    374 uvm_shareprot(entry, prot)
    375 	vm_map_entry_t entry;
    376 	vm_prot_t prot;
    377 {
    378 	struct uvm_object *uobj = entry->object.uvm_obj;
    379 	struct vm_page *pp;
    380 	vaddr_t start, stop;
    381 	UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
    382 
    383 	if (UVM_ET_ISSUBMAP(entry))
    384 		panic("uvm_shareprot: non-object attached");
    385 
    386 	start = entry->offset;
    387 	stop = start + (entry->end - entry->start);
    388 
    389 	/*
    390 	 * traverse list of pages in object.   if page in range, pmap_prot it
    391 	 */
    392 
    393 	for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
    394 		if (pp->offset >= start && pp->offset < stop)
    395 			pmap_page_protect(PMAP_PGARG(pp), prot);
    396 	}
    397 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    398 }
    399 
    400 /*
    401  * uvm_pager_put: high level pageout routine
    402  *
    403  * we want to pageout page "pg" to backing store, clustering if
    404  * possible.
    405  *
    406  * => page queues must be locked by caller
    407  * => if page is not swap-backed, then "uobj" points to the object
    408  *	backing it.   this object should be locked by the caller.
    409  * => if page is swap-backed, then "uobj" should be NULL.
    410  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
    411  *    for swap-backed memory, "pg" can be NULL if there is no page
    412  *    of interest [sometimes the case for the pagedaemon]
    413  * => "ppsp_ptr" should point to an array of npages vm_page pointers
    414  *	for possible cluster building
    415  * => flags (first two for non-swap-backed pages)
    416  *	PGO_ALLPAGES: all pages in uobj are valid targets
    417  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
    418  *	PGO_SYNCIO: do SYNC I/O (no async)
    419  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
    420  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
    421  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
    422  * => return state:
    423  *	1. we return the VM_PAGER status code of the pageout
    424  *	2. we return with the page queues unlocked
    425  *	3. if (uobj != NULL) [!swap_backed] we return with
    426  *		uobj locked _only_ if PGO_PDFREECLUST is set
    427  *		AND result != VM_PAGER_PEND.   in all other cases
    428  *		we return with uobj unlocked.   [this is a hack
    429  *		that allows the pagedaemon to save one lock/unlock
    430  *		pair in the !swap_backed case since we have to
    431  *		lock the uobj to drop the cluster anyway]
    432  *	4. on errors we always drop the cluster.   thus, if we return
    433  *		!PEND, !OK, then the caller only has to worry about
    434  *		un-busying the main page (not the cluster pages).
    435  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
    436  *		with all pages busy (caller must un-busy and check
    437  *		wanted/released flags).
    438  */
    439 
    440 int
    441 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
    442 	struct uvm_object *uobj;	/* IN */
    443 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
    444 	int *npages;			/* IN/OUT */
    445 	int flags;			/* IN */
    446 	vaddr_t start, stop;		/* IN, IN */
    447 {
    448 	int result;
    449 	daddr_t swblk;
    450 	struct vm_page **ppsp = *ppsp_ptr;
    451 
    452 	/*
    453 	 * note that uobj is null  if we are doing a swap-backed pageout.
    454 	 * note that uobj is !null if we are doing normal object pageout.
    455 	 * note that the page queues must be locked to cluster.
    456 	 */
    457 
    458 	if (uobj) {	/* if !swap-backed */
    459 
    460 		/*
    461 		 * attempt to build a cluster for pageout using its
    462 		 * make-put-cluster function (if it has one).
    463 		 */
    464 
    465 		if (uobj->pgops->pgo_mk_pcluster) {
    466 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
    467 			    npages, pg, flags, start, stop);
    468 			*ppsp_ptr = ppsp;  /* update caller's pointer */
    469 		} else {
    470 			ppsp[0] = pg;
    471 			*npages = 1;
    472 		}
    473 
    474 		swblk = 0;		/* XXX: keep gcc happy */
    475 
    476 	} else {
    477 
    478 		/*
    479 		 * for swap-backed pageout, the caller (the pagedaemon) has
    480 		 * already built the cluster for us.   the starting swap
    481 		 * block we are writing to has been passed in as "start."
    482 		 * "pg" could be NULL if there is no page we are especially
    483 		 * interested in (in which case the whole cluster gets dropped
    484 		 * in the event of an error or a sync "done").
    485 		 */
    486 		swblk = (daddr_t) start;
    487 		/* ppsp and npages should be ok */
    488 	}
    489 
    490 	/* now that we've clustered we can unlock the page queues */
    491 	uvm_unlock_pageq();
    492 
    493 	/*
    494 	 * now attempt the I/O.   if we have a failure and we are
    495 	 * clustered, we will drop the cluster and try again.
    496 	 */
    497 
    498 ReTry:
    499 	if (uobj) {
    500 		/* object is locked */
    501 		simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED);
    502 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
    503 		/* object is now unlocked */
    504 		simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED);
    505 	} else {
    506 		/* nothing locked */
    507 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
    508 		/* nothing locked */
    509 	}
    510 
    511 	/*
    512 	 * we have attempted the I/O.
    513 	 *
    514 	 * if the I/O was a success then:
    515 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
    516 	 *		caller (who must un-busy all pages)
    517 	 *	else we un-busy cluster pages for the pagedaemon
    518 	 *
    519 	 * if I/O is pending (async i/o) then we return the pending code.
    520 	 * [in this case the async i/o done function must clean up when
    521 	 *  i/o is done...]
    522 	 */
    523 
    524 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
    525 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
    526 			/*
    527 			 * drop cluster and relock object (only if I/O is
    528 			 * not pending)
    529 			 */
    530 			if (uobj)
    531 				/* required for dropcluster */
    532 				simple_lock(&uobj->vmobjlock);
    533 			if (*npages > 1 || pg == NULL)
    534 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
    535 				    PGO_PDFREECLUST, 0);
    536 			/* if (uobj): object still locked, as per
    537 			 * return-state item #3 */
    538 		}
    539 		return (result);
    540 	}
    541 
    542 	/*
    543 	 * a pager error occured.    if we have clustered, we drop the
    544 	 * cluster and try again.
    545 	 */
    546 
    547 	if (*npages > 1 || pg == NULL) {
    548 		if (uobj)
    549 			simple_lock(&uobj->vmobjlock);
    550 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
    551 		    swblk);
    552 		if (pg != NULL)
    553 			goto ReTry;
    554 	}
    555 
    556 	/*
    557 	 * a pager error occured (even after dropping the cluster, if there
    558 	 * was one).    give up!   the caller only has one page ("pg")
    559 	 * to worry about.
    560 	 */
    561 
    562 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
    563 		simple_lock(&uobj->vmobjlock);
    564 	return(result);
    565 }
    566 
    567 /*
    568  * uvm_pager_dropcluster: drop a cluster we have built (because we
    569  * got an error, or, if PGO_PDFREECLUST we are un-busying the
    570  * cluster pages on behalf of the pagedaemon).
    571  *
    572  * => uobj, if non-null, is a non-swap-backed object that is
    573  *	locked by the caller.   we return with this object still
    574  *	locked.
    575  * => page queues are not locked
    576  * => pg is our page of interest (the one we clustered around, can be null)
    577  * => ppsp/npages is our current cluster
    578  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
    579  *	pages on behalf of the pagedaemon.
    580  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
    581  *		clustered swap-backed pages (except for "pg" if !NULL)
    582  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
    583  *		[only meaningful if swap-backed (uobj == NULL)]
    584  */
    585 
    586 
    587 void
    588 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
    589 	struct uvm_object *uobj;	/* IN */
    590 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
    591 	int *npages;			/* IN/OUT */
    592 	int flags;
    593 	int swblk;			/* valid if (uobj == NULL &&
    594 					   PGO_REALLOCSWAP) */
    595 {
    596 	int lcv;
    597 	boolean_t obj_is_alive;
    598 	struct uvm_object *saved_uobj;
    599 
    600 	/*
    601 	 * if we need to reallocate swap space for the cluster we are dropping
    602 	 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
    603 	 * allocation now.   save a block for "pg" if it is non-NULL.
    604 	 *
    605 	 * note that we will zap the object's pointer to swap in the "for" loop
    606 	 * below...
    607 	 */
    608 
    609 	if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    610 		if (pg)
    611 			uvm_swap_free(swblk + 1, *npages - 1);
    612 		else
    613 			uvm_swap_free(swblk, *npages);
    614 	}
    615 
    616 	/*
    617 	 * drop all pages but "pg"
    618 	 */
    619 
    620 	for (lcv = 0 ; lcv < *npages ; lcv++) {
    621 
    622 		/* skip "pg" or empty slot */
    623 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
    624 			continue;
    625 
    626 		/*
    627 		 * if swap-backed, gain lock on object that owns page.  note
    628 		 * that PQ_ANON bit can't change as long as we are holding
    629 		 * the PG_BUSY bit (so there is no need to lock the page
    630 		 * queues to test it).
    631 		 *
    632 		 * once we have the lock, dispose of the pointer to swap, if
    633 		 * requested
    634 		 */
    635 		if (!uobj) {
    636 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    637 				simple_lock(&ppsp[lcv]->uanon->an_lock);
    638 				if (flags & PGO_REALLOCSWAP)
    639 					  /* zap swap block */
    640 					  ppsp[lcv]->uanon->an_swslot = 0;
    641 			} else {
    642 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
    643 				if (flags & PGO_REALLOCSWAP)
    644 					uao_set_swslot(ppsp[lcv]->uobject,
    645 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
    646 			}
    647 		}
    648 
    649 		/* did someone want the page while we had it busy-locked? */
    650 		if (ppsp[lcv]->flags & PG_WANTED) {
    651 			/* still holding obj lock */
    652 			wakeup(ppsp[lcv]);
    653 		}
    654 
    655 		/* if page was released, release it.  otherwise un-busy it */
    656 		if (ppsp[lcv]->flags & PG_RELEASED) {
    657 
    658 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    659 				/* so that anfree will free */
    660 				ppsp[lcv]->flags &= ~(PG_BUSY);
    661 				UVM_PAGE_OWN(ppsp[lcv], NULL);
    662 
    663 				pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
    664 				    VM_PROT_NONE); /* be safe */
    665 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    666 				/* kills anon and frees pg */
    667 				uvm_anfree(ppsp[lcv]->uanon);
    668 
    669 				continue;
    670 			}
    671 
    672 			/*
    673 			 * pgo_releasepg will dump the page for us
    674 			 */
    675 
    676 #ifdef DIAGNOSTIC
    677 			if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
    678 				panic("uvm_pager_dropcluster: no releasepg "
    679 				    "function");
    680 #endif
    681 			saved_uobj = ppsp[lcv]->uobject;
    682 			obj_is_alive =
    683 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
    684 
    685 #ifdef DIAGNOSTIC
    686 			/* for normal objects, "pg" is still PG_BUSY by us,
    687 			 * so obj can't die */
    688 			if (uobj && !obj_is_alive)
    689 				panic("uvm_pager_dropcluster: object died "
    690 				    "with active page");
    691 #endif
    692 			/* only unlock the object if it is still alive...  */
    693 			if (obj_is_alive && saved_uobj != uobj)
    694 				simple_unlock(&saved_uobj->vmobjlock);
    695 
    696 			/*
    697 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
    698 			 * how pass that
    699 			 * info up to caller.  we are currently ignoring it...
    700 			 */
    701 
    702 			continue;		/* next page */
    703 
    704 		} else {
    705 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
    706 			UVM_PAGE_OWN(ppsp[lcv], NULL);
    707 		}
    708 
    709 		/*
    710 		 * if we are operating on behalf of the pagedaemon and we
    711 		 * had a successful pageout update the page!
    712 		 */
    713 		if (flags & PGO_PDFREECLUST) {
    714 			pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
    715 			pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
    716 			ppsp[lcv]->flags |= PG_CLEAN;
    717 		}
    718 
    719 		/* if anonymous cluster, unlock object and move on */
    720 		if (!uobj) {
    721 			if (ppsp[lcv]->pqflags & PQ_ANON)
    722 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    723 			else
    724 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
    725 		}
    726 
    727 	}
    728 
    729 	/*
    730 	 * drop to a cluster of 1 page ("pg") if requested
    731 	 */
    732 
    733 	if (pg && (flags & PGO_PDFREECLUST) == 0) {
    734 		/*
    735 		 * if we are not a successful pageout, we make a 1 page cluster.
    736 		 */
    737 		ppsp[0] = pg;
    738 		*npages = 1;
    739 
    740 		/*
    741 		 * assign new swap block to new cluster, if anon backed
    742 		 */
    743 		if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    744 			if (pg->pqflags & PQ_ANON) {
    745 				simple_lock(&pg->uanon->an_lock);
    746 				pg->uanon->an_swslot = swblk;	/* reassign */
    747 				simple_unlock(&pg->uanon->an_lock);
    748 			} else {
    749 				simple_lock(&pg->uobject->vmobjlock);
    750 				uao_set_swslot(pg->uobject,
    751 				    pg->offset >> PAGE_SHIFT, swblk);
    752 				simple_unlock(&pg->uobject->vmobjlock);
    753 			}
    754 		}
    755 	}
    756 }
    757 
    758 /*
    759  * interrupt-context iodone handler for nested i/o bufs.
    760  *
    761  * => must be at splbio().
    762  */
    763 
    764 void
    765 uvm_aio_biodone1(bp)
    766 	struct buf *bp;
    767 {
    768 	struct buf *mbp = bp->b_private;
    769 
    770 #ifdef DIAGNOSTIC
    771 	if (mbp == bp) {
    772 		panic("uvm_aio_biodone1: mbp == bp %p", bp);
    773 	}
    774 #endif
    775 
    776 	if (bp->b_flags & B_ERROR) {
    777 		mbp->b_flags |= B_ERROR;
    778 		mbp->b_error = bp->b_error;
    779 	}
    780 	mbp->b_resid -= bp->b_bcount;
    781 	pool_put(&bufpool, bp);
    782 	if (mbp->b_resid == 0) {
    783 		biodone(mbp);
    784 	}
    785 }
    786 
    787 /*
    788  * interrupt-context iodone handler for single-buf i/os
    789  * or the top-level buf of a nested-buf i/o.
    790  *
    791  * => must be at splbio().
    792  */
    793 
    794 void
    795 uvm_aio_biodone(bp)
    796 	struct buf *bp;
    797 {
    798 	/* reset b_iodone for when this is a single-buf i/o. */
    799 	bp->b_iodone = uvm_aio_aiodone;
    800 
    801 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
    802 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
    803 	wakeup(&uvm.aiodoned);
    804 	simple_unlock(&uvm.aiodoned_lock);
    805 }
    806 
    807 /*
    808  * uvm_aio_aiodone: do iodone processing for async i/os.
    809  * this should be called in thread context, not interrupt context.
    810  */
    811 
    812 void
    813 uvm_aio_aiodone(bp)
    814 	struct buf *bp;
    815 {
    816 	int npages = bp->b_bufsize >> PAGE_SHIFT;
    817 	struct vm_page *pg, *pgs[npages];
    818 	struct uvm_object *uobj;
    819 	int s, i;
    820 	boolean_t release, write, swap;
    821 	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
    822 	UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0);
    823 
    824 	release = (bp->b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
    825 	write = (bp->b_flags & B_READ) == 0;
    826 	uobj = NULL;
    827 	for (i = 0; i < npages; i++) {
    828 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    829 	}
    830 	uvm_pagermapout((vaddr_t)bp->b_data, npages);
    831 	for (i = 0; i < npages; i++) {
    832 		pg = pgs[i];
    833 
    834 		if (i == 0) {
    835 			swap = (pg->pqflags & PQ_SWAPBACKED) != 0;
    836 			if (!swap) {
    837 				uobj = pg->uobject;
    838 				simple_lock(&uobj->vmobjlock);
    839 			}
    840 		}
    841 #ifdef DIAGNOSTIC
    842 		if (!swap && pg->uobject != uobj) {
    843 			panic("uvm_aio_aiodone: mismatched pg %d %p uobj %p",
    844 			      i, pg, uobj);
    845 		}
    846 #endif
    847 
    848 		if (swap) {
    849 			if (pg->pqflags & PQ_ANON) {
    850 				simple_lock(&pg->uanon->an_lock);
    851 			} else {
    852 				simple_lock(&pg->uobject->vmobjlock);
    853 			}
    854 		}
    855 
    856 		/*
    857 		 * if this is a read and we got an error, mark the pages
    858 		 * PG_RELEASED so that uvm_page_unbusy() will free them.
    859 		 */
    860 
    861 		if (release) {
    862 			if (pg->pqflags & PQ_ANON) {
    863 				pg->flags &= ~(PG_BUSY);
    864 				UVM_PAGE_OWN(pg, NULL);
    865 				simple_unlock(&pg->uanon->an_lock);
    866 				uvm_anfree(pg->uanon);
    867 			} else {
    868 				uobj->pgops->pgo_releasepg(pg, NULL);
    869 				if (swap) {
    870 					simple_unlock(&pg->uobject->vmobjlock);
    871 				}
    872 			}
    873 			continue;
    874 		}
    875 
    876 #ifdef DIAGNOSTIC
    877 		if (write && pgs[i]->flags & PG_FAKE) {
    878 			panic("uvm_aio_aiodone: wrote PG_FAKE page %p", pgs[i]);
    879 		}
    880 #endif
    881 
    882 		/*
    883 		 * if this is a read and the page is PG_FAKE
    884 		 * or this was a write, mark the page PG_CLEAN and not PG_FAKE.
    885 		 */
    886 
    887 		if (pgs[i]->flags & PG_FAKE || write) {
    888 			pmap_clear_reference(PMAP_PGARG(pgs[i]));
    889 			pmap_clear_modify(PMAP_PGARG(pgs[i]));
    890 			pgs[i]->flags |= PG_CLEAN;
    891 			pgs[i]->flags &= ~PG_FAKE;
    892 		}
    893 		if (swap) {
    894 			if (pg->pqflags & PQ_ANON) {
    895 				simple_unlock(&pg->uanon->an_lock);
    896 			} else {
    897 				simple_unlock(&pg->uobject->vmobjlock);
    898 			}
    899 		}
    900 	}
    901 	uvm_page_unbusy(pgs, npages);
    902 	if (!swap) {
    903 		simple_unlock(&uobj->vmobjlock);
    904 	}
    905 
    906 	s = splbio();
    907 	pool_put(&bufpool, bp);
    908 	splx(s);
    909 }
    910 
    911 /*
    912  * translate unix errno values to VM_PAGER_*.
    913  */
    914 
    915 int
    916 uvm_errno2vmerror(errno)
    917 	int errno;
    918 {
    919 	switch (errno) {
    920 	case 0:
    921 		return VM_PAGER_OK;
    922 	case EINVAL:
    923 		return VM_PAGER_BAD;
    924 	case EINPROGRESS:
    925 		return VM_PAGER_PEND;
    926 	case EIO:
    927 		return VM_PAGER_ERROR;
    928 	case EAGAIN:
    929 		return VM_PAGER_AGAIN;
    930 	case EBUSY:
    931 		return VM_PAGER_UNLOCK;
    932 	default:
    933 		return VM_PAGER_ERROR;
    934 	}
    935 }
    936