Home | History | Annotate | Line # | Download | only in uvm
uvm_pager.c revision 1.16.4.1
      1 /*	$NetBSD: uvm_pager.c,v 1.16.4.1 1999/06/07 04:25:37 chs Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
     35  */
     36 
     37 #include "opt_pmap_new.h"
     38 
     39 /*
     40  * uvm_pager.c: generic functions used to assist the pagers.
     41  */
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/proc.h>
     46 #include <sys/malloc.h>
     47 #include <sys/pool.h>
     48 
     49 #include <vm/vm.h>
     50 #include <vm/vm_page.h>
     51 #include <vm/vm_kern.h>
     52 
     53 #define UVM_PAGER
     54 #include <uvm/uvm.h>
     55 
     56 struct pool *uvm_aiobuf_pool;
     57 
     58 /*
     59  * list of uvm pagers in the system
     60  */
     61 
     62 extern struct uvm_pagerops aobj_pager;
     63 extern struct uvm_pagerops uvm_deviceops;
     64 extern struct uvm_pagerops uvm_vnodeops;
     65 extern struct uvm_pagerops ubc_pager;
     66 
     67 struct uvm_pagerops *uvmpagerops[] = {
     68 	&aobj_pager,
     69 	&uvm_deviceops,
     70 	&uvm_vnodeops,
     71 	&ubc_pager,
     72 };
     73 
     74 /*
     75  * the pager map: provides KVA for I/O
     76  */
     77 
     78 #define PAGER_MAP_SIZE       (4 * 1024 * 1024)
     79 vm_map_t pager_map;		/* XXX */
     80 simple_lock_data_t pager_map_wanted_lock;
     81 boolean_t pager_map_wanted;	/* locked by pager map */
     82 
     83 
     84 /*
     85  * uvm_pager_init: init pagers (at boot time)
     86  */
     87 
     88 void
     89 uvm_pager_init()
     90 {
     91 	int lcv;
     92 
     93 	/*
     94 	 * init pager map
     95 	 */
     96 
     97 	pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
     98 				    PAGER_MAP_SIZE, FALSE, FALSE, NULL);
     99 	simple_lock_init(&pager_map_wanted_lock);
    100 	pager_map_wanted = FALSE;
    101 
    102 	/*
    103 	 * init ASYNC I/O queue
    104 	 */
    105 
    106 	TAILQ_INIT(&uvm.aio_done);
    107 	uvm_aiobuf_pool = pool_create(sizeof(struct uvm_aiobuf),
    108 				      0, 0, 0, "aiobuf", 0, NULL, NULL, 0);
    109 
    110 
    111 	/*
    112 	 * call pager init functions
    113 	 */
    114 	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
    115 	    lcv++) {
    116 		if (uvmpagerops[lcv]->pgo_init)
    117 			uvmpagerops[lcv]->pgo_init();
    118 	}
    119 }
    120 
    121 /*
    122  * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings
    123  *
    124  * we basically just map in a blank map entry to reserve the space in the
    125  * map and then use pmap_enter() to put the mappings in by hand.
    126  */
    127 
    128 vaddr_t
    129 uvm_pagermapin(pps, npages, aiop, waitf)
    130 	struct vm_page **pps;
    131 	int npages;
    132 	struct uvm_aiodesc **aiop;	/* OUT */
    133 	int waitf;
    134 {
    135 	vsize_t size;
    136 	vaddr_t kva;
    137 	struct uvm_aiodesc *aio;
    138 #if !defined(PMAP_NEW)
    139 	vaddr_t cva;
    140 	struct vm_page *pp;
    141 #endif
    142 	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
    143 
    144 	UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d, aiop=0x%x, waitf=%d)",
    145 	      pps, npages, aiop, waitf);
    146 
    147 ReStart:
    148 	if (aiop) {
    149 		MALLOC(aio, struct uvm_aiodesc *, sizeof(*aio), M_TEMP, waitf);
    150 		if (aio == NULL)
    151 			return(0);
    152 		*aiop = aio;
    153 	} else {
    154 		aio = NULL;
    155 	}
    156 
    157 	size = npages << PAGE_SHIFT;
    158 	kva = NULL;			/* let system choose VA */
    159 
    160 	if (uvm_map(pager_map, &kva, size, NULL,
    161 	      UVM_UNKNOWN_OFFSET, UVM_FLAG_NOMERGE) != KERN_SUCCESS) {
    162 		if (waitf == M_NOWAIT) {
    163 			if (aio)
    164 				FREE(aio, M_TEMP);
    165 			UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0);
    166 			return(NULL);
    167 		}
    168 		simple_lock(&pager_map_wanted_lock);
    169 		pager_map_wanted = TRUE;
    170 		UVMHIST_LOG(maphist, "  SLEEPING on pager_map",0,0,0,0);
    171 		UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE,
    172 		    "pager_map",0);
    173 		goto ReStart;
    174 	}
    175 
    176 #if defined(PMAP_NEW)
    177 	/*
    178 	 * XXX: (ab)using the pmap module to store state info for us.
    179 	 * (pmap stores the PAs... we fetch them back later and convert back
    180 	 * to pages with PHYS_TO_VM_PAGE).
    181 	 */
    182 	pmap_kenter_pgs(kva, pps, npages);
    183 
    184 #else /* PMAP_NEW */
    185 
    186 	/* got it */
    187 	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
    188 		pp = *pps++;
    189 #ifdef DEBUG
    190 		if ((pp->flags & PG_BUSY) == 0)
    191 			panic("uvm_pagermapin: page not busy");
    192 #endif
    193 
    194 		pmap_enter(vm_map_pmap(pager_map), cva, VM_PAGE_TO_PHYS(pp),
    195 		    VM_PROT_DEFAULT, TRUE, 0);
    196 	}
    197 
    198 #endif /* PMAP_NEW */
    199 
    200 	UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0);
    201 	return(kva);
    202 }
    203 
    204 /*
    205  * uvm_pagermapout: remove pager_map mapping
    206  *
    207  * we remove our mappings by hand and then remove the mapping (waking
    208  * up anyone wanting space).
    209  */
    210 
    211 void
    212 uvm_pagermapout(kva, npages)
    213 	vaddr_t kva;
    214 	int npages;
    215 {
    216 	vsize_t size = npages << PAGE_SHIFT;
    217 	vm_map_entry_t entries;
    218 	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
    219 
    220 	UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0);
    221 
    222 	/*
    223 	 * duplicate uvm_unmap, but add in pager_map_wanted handling.
    224 	 */
    225 
    226 	vm_map_lock(pager_map);
    227 	(void) uvm_unmap_remove(pager_map, kva, kva + size, &entries);
    228 	simple_lock(&pager_map_wanted_lock);
    229 	if (pager_map_wanted) {
    230 		pager_map_wanted = FALSE;
    231 		wakeup(pager_map);
    232 	}
    233 	simple_unlock(&pager_map_wanted_lock);
    234 	vm_map_unlock(pager_map);
    235 	if (entries)
    236 		uvm_unmap_detach(entries, 0);
    237 
    238 	UVMHIST_LOG(maphist,"<- done",0,0,0,0);
    239 }
    240 
    241 /*
    242  * uvm_mk_pcluster
    243  *
    244  * generic "make 'pager put' cluster" function.  a pager can either
    245  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
    246  * generic function, or [3] set it to a pager specific function.
    247  *
    248  * => caller must lock object _and_ pagequeues (since we need to look
    249  *    at active vs. inactive bits, etc.)
    250  * => caller must make center page busy and write-protect it
    251  * => we mark all cluster pages busy for the caller
    252  * => the caller must unbusy all pages (and check wanted/released
    253  *    status if it drops the object lock)
    254  * => flags:
    255  *      PGO_ALLPAGES:  all pages in object are valid targets
    256  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
    257  *      PGO_DOACTCLUST: include active pages in cluster.
    258  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
    259  *              PG_CLEANCHK is only a hint, but clearing will help reduce
    260  *		the number of calls we make to the pmap layer.
    261  */
    262 
    263 struct vm_page **
    264 uvm_mk_pcluster(uobj, pps, npages, center, flags, mlo, mhi)
    265 	struct uvm_object *uobj;	/* IN */
    266 	struct vm_page **pps, *center;  /* IN/OUT, IN */
    267 	int *npages, flags;		/* IN/OUT, IN */
    268 	vaddr_t mlo, mhi;		/* IN (if !PGO_ALLPAGES) */
    269 {
    270 	struct vm_page **ppsp, *pclust;
    271 	vaddr_t lo, hi, curoff;
    272 	int center_idx, forward, incr;
    273 	UVMHIST_FUNC("uvm_mk_pcluster"); UVMHIST_CALLED(maphist);
    274 
    275 	/*
    276 	 * center page should already be busy and write protected.  XXX:
    277 	 * suppose page is wired?  if we lock, then a process could
    278 	 * fault/block on it.  if we don't lock, a process could write the
    279 	 * pages in the middle of an I/O.  (consider an msync()).  let's
    280 	 * lock it for now (better to delay than corrupt data?).
    281 	 */
    282 
    283 	/*
    284 	 * get cluster boundaries, check sanity, and apply our limits as well.
    285 	 */
    286 
    287 	uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
    288 	if ((flags & PGO_ALLPAGES) == 0) {
    289 		if (lo < mlo)
    290 			lo = mlo;
    291 		if (hi > mhi)
    292 			hi = mhi;
    293 	}
    294 	if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
    295 #ifdef DIAGNOSTIC
    296 	    printf("uvm_mk_pcluster: provided page array too small (fixed)\n");
    297 #endif
    298 		pps[0] = center;
    299 		*npages = 1;
    300 		return(pps);
    301 	}
    302 
    303 	/*
    304 	 * now determine the center and attempt to cluster around the
    305 	 * edges
    306 	 */
    307 
    308 	center_idx = (center->offset - lo) >> PAGE_SHIFT;
    309 	pps[center_idx] = center;	/* plug in the center page */
    310 	ppsp = &pps[center_idx];
    311 	*npages = 1;
    312 
    313 	/*
    314 	 * attempt to cluster around the left [backward], and then
    315 	 * the right side [forward].
    316 	 *
    317 	 * note that for inactive pages (pages that have been deactivated)
    318 	 * there are no valid mappings and PG_CLEAN should be up to date.
    319 	 * [i.e. there is no need to query the pmap with pmap_is_modified
    320 	 * since there are no mappings].
    321 	 */
    322 
    323 	for (forward  = 0 ; forward <= 1 ; forward++) {
    324 		incr = forward ? PAGE_SIZE : -PAGE_SIZE;
    325 		curoff = center->offset + incr;
    326 		for ( ;(forward == 0 && curoff >= lo) ||
    327 		       (forward && curoff < hi);
    328 		      curoff += incr) {
    329 
    330 			pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
    331 			if (pclust == NULL) {
    332 				break;			/* no page */
    333 			}
    334 			/* handle active pages */
    335 			/* NOTE: inactive pages don't have pmap mappings */
    336 			if ((pclust->pqflags & PQ_INACTIVE) == 0) {
    337 				if ((flags & PGO_DOACTCLUST) == 0) {
    338 					/* dont want mapped pages at all */
    339 					break;
    340 				}
    341 
    342 				/* make sure "clean" bit is sync'd */
    343 				if ((pclust->flags & PG_CLEANCHK) == 0) {
    344 					if ((pclust->flags & (PG_CLEAN|PG_BUSY))
    345 					   == PG_CLEAN &&
    346 					   pmap_is_modified(PMAP_PGARG(pclust)))
    347 					pclust->flags &= ~PG_CLEAN;
    348 					/* now checked */
    349 					pclust->flags |= PG_CLEANCHK;
    350 				}
    351 			}
    352 
    353 			/* is page available for cleaning and does it need it */
    354 			if ((pclust->flags & (PG_CLEAN|PG_BUSY)) != 0) {
    355 				break;	/* page is already clean or is busy */
    356 			}
    357 
    358 			/* XXX for now, disable putpage clustering */
    359 			break;
    360 
    361 			/* yes!   enroll the page in our array */
    362 			pclust->flags |= PG_BUSY;		/* busy! */
    363 			UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
    364 
    365 			/* XXX: protect wired page?   see above comment. */
    366 			pmap_page_protect(PMAP_PGARG(pclust), VM_PROT_READ);
    367 			if (!forward) {
    368 				ppsp--;			/* back up one page */
    369 				*ppsp = pclust;
    370 			} else {
    371 				/* move forward one page */
    372 				ppsp[*npages] = pclust;
    373 			}
    374 			(*npages)++;
    375 		}
    376 	}
    377 
    378 	/*
    379 	 * done!  return the cluster array to the caller!!!
    380 	 */
    381 
    382 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    383 	return(ppsp);
    384 }
    385 
    386 
    387 /*
    388  * uvm_shareprot: generic share protect routine
    389  *
    390  * => caller must lock map entry's map
    391  * => caller must lock object pointed to by map entry
    392  */
    393 
    394 void
    395 uvm_shareprot(entry, prot)
    396 	vm_map_entry_t entry;
    397 	vm_prot_t prot;
    398 {
    399 	struct uvm_object *uobj = entry->object.uvm_obj;
    400 	struct vm_page *pp;
    401 	vaddr_t start, stop;
    402 	UVMHIST_FUNC("uvm_shareprot"); UVMHIST_CALLED(maphist);
    403 
    404 	if (UVM_ET_ISSUBMAP(entry))
    405 		panic("uvm_shareprot: non-object attached");
    406 
    407 	start = entry->offset;
    408 	stop = start + (entry->end - entry->start);
    409 
    410 	/*
    411 	 * traverse list of pages in object.   if page in range, pmap_prot it
    412 	 */
    413 
    414 	for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) {
    415 		if (pp->offset >= start && pp->offset < stop)
    416 			pmap_page_protect(PMAP_PGARG(pp), prot);
    417 	}
    418 	UVMHIST_LOG(maphist, "<- done",0,0,0,0);
    419 }
    420 
    421 /*
    422  * uvm_pager_put: high level pageout routine
    423  *
    424  * we want to pageout page "pg" to backing store, clustering if
    425  * possible.
    426  *
    427  * => page queues must be locked by caller
    428  * => if page is not swap-backed, then "uobj" points to the object
    429  *	backing it.   this object should be locked by the caller.
    430  * => if page is swap-backed, then "uobj" should be NULL.
    431  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
    432  *    for swap-backed memory, "pg" can be NULL if there is no page
    433  *    of interest [sometimes the case for the pagedaemon]
    434  * => "ppsp_ptr" should point to an array of npages vm_page pointers
    435  *	for possible cluster building
    436  * => flags (first two for non-swap-backed pages)
    437  *	PGO_ALLPAGES: all pages in uobj are valid targets
    438  *	PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
    439  *	PGO_SYNCIO: do SYNC I/O (no async)
    440  *	PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
    441  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
    442  *		  if (!uobj) start is the (daddr_t) of the starting swapblk
    443  * => return state:
    444  *	1. we return the VM_PAGER status code of the pageout
    445  *	2. we return with the page queues unlocked
    446  *	3. if (uobj != NULL) [!swap_backed] we return with
    447  *		uobj locked _only_ if PGO_PDFREECLUST is set
    448  *		AND result != VM_PAGER_PEND.   in all other cases
    449  *		we return with uobj unlocked.   [this is a hack
    450  *		that allows the pagedaemon to save one lock/unlock
    451  *		pair in the !swap_backed case since we have to
    452  *		lock the uobj to drop the cluster anyway]
    453  *	4. on errors we always drop the cluster.   thus, if we return
    454  *		!PEND, !OK, then the caller only has to worry about
    455  *		un-busying the main page (not the cluster pages).
    456  *	5. on success, if !PGO_PDFREECLUST, we return the cluster
    457  *		with all pages busy (caller must un-busy and check
    458  *		wanted/released flags).
    459  */
    460 
    461 int
    462 uvm_pager_put(uobj, pg, ppsp_ptr, npages, flags, start, stop)
    463 	struct uvm_object *uobj;	/* IN */
    464 	struct vm_page *pg, ***ppsp_ptr;/* IN, IN/OUT */
    465 	int *npages;			/* IN/OUT */
    466 	int flags;			/* IN */
    467 	vaddr_t start, stop;		/* IN, IN */
    468 {
    469 	int result;
    470 	daddr_t swblk;
    471 	struct vm_page **ppsp = *ppsp_ptr;
    472 
    473 	/*
    474 	 * note that uobj is null  if we are doing a swap-backed pageout.
    475 	 * note that uobj is !null if we are doing normal object pageout.
    476 	 * note that the page queues must be locked to cluster.
    477 	 */
    478 
    479 	if (uobj) {	/* if !swap-backed */
    480 
    481 		/*
    482 		 * attempt to build a cluster for pageout using its
    483 		 * make-put-cluster function (if it has one).
    484 		 */
    485 
    486 		if (uobj->pgops->pgo_mk_pcluster) {
    487 			ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
    488 			    npages, pg, flags, start, stop);
    489 			*ppsp_ptr = ppsp;  /* update caller's pointer */
    490 		} else {
    491 			ppsp[0] = pg;
    492 			*npages = 1;
    493 		}
    494 
    495 		swblk = 0;		/* XXX: keep gcc happy */
    496 
    497 	} else {
    498 
    499 		/*
    500 		 * for swap-backed pageout, the caller (the pagedaemon) has
    501 		 * already built the cluster for us.   the starting swap
    502 		 * block we are writing to has been passed in as "start."
    503 		 * "pg" could be NULL if there is no page we are especially
    504 		 * interested in (in which case the whole cluster gets dropped
    505 		 * in the event of an error or a sync "done").
    506 		 */
    507 		swblk = (daddr_t) start;
    508 		/* ppsp and npages should be ok */
    509 	}
    510 
    511 	/* now that we've clustered we can unlock the page queues */
    512 	uvm_unlock_pageq();
    513 
    514 	/*
    515 	 * now attempt the I/O.   if we have a failure and we are
    516 	 * clustered, we will drop the cluster and try again.
    517 	 */
    518 
    519 ReTry:
    520 	if (uobj) {
    521 		/* object is locked */
    522 		simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED);
    523 		result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
    524 		/* object is now unlocked */
    525 		simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED);
    526 	} else {
    527 		/* nothing locked */
    528 		result = uvm_swap_put(swblk, ppsp, *npages, flags);
    529 		/* nothing locked */
    530 	}
    531 
    532 	/*
    533 	 * we have attempted the I/O.
    534 	 *
    535 	 * if the I/O was a success then:
    536 	 * 	if !PGO_PDFREECLUST, we return the cluster to the
    537 	 *		caller (who must un-busy all pages)
    538 	 *	else we un-busy cluster pages for the pagedaemon
    539 	 *
    540 	 * if I/O is pending (async i/o) then we return the pending code.
    541 	 * [in this case the async i/o done function must clean up when
    542 	 *  i/o is done...]
    543 	 */
    544 
    545 	if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
    546 		if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
    547 			/*
    548 			 * drop cluster and relock object (only if I/O is
    549 			 * not pending)
    550 			 */
    551 			if (uobj)
    552 				/* required for dropcluster */
    553 				simple_lock(&uobj->vmobjlock);
    554 			if (*npages > 1 || pg == NULL)
    555 				uvm_pager_dropcluster(uobj, pg, ppsp, npages,
    556 				    PGO_PDFREECLUST, 0);
    557 			/* if (uobj): object still locked, as per
    558 			 * return-state item #3 */
    559 		}
    560 		return (result);
    561 	}
    562 
    563 	/*
    564 	 * a pager error occured.    if we have clustered, we drop the
    565 	 * cluster and try again.
    566 	 */
    567 
    568 	if (*npages > 1 || pg == NULL) {
    569 		if (uobj)
    570 			simple_lock(&uobj->vmobjlock);
    571 		uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP,
    572 		    swblk);
    573 		if (pg != NULL)
    574 			goto ReTry;
    575 	}
    576 
    577 	/*
    578 	 * a pager error occured (even after dropping the cluster, if there
    579 	 * was one).    give up!   the caller only has one page ("pg")
    580 	 * to worry about.
    581 	 */
    582 
    583 	if (uobj && (flags & PGO_PDFREECLUST) != 0)
    584 		simple_lock(&uobj->vmobjlock);
    585 	return(result);
    586 }
    587 
    588 /*
    589  * uvm_pager_dropcluster: drop a cluster we have built (because we
    590  * got an error, or, if PGO_PDFREECLUST we are un-busying the
    591  * cluster pages on behalf of the pagedaemon).
    592  *
    593  * => uobj, if non-null, is a non-swap-backed object that is
    594  *	locked by the caller.   we return with this object still
    595  *	locked.
    596  * => page queues are not locked
    597  * => pg is our page of interest (the one we clustered around, can be null)
    598  * => ppsp/npages is our current cluster
    599  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
    600  *	pages on behalf of the pagedaemon.
    601  *           PGO_REALLOCSWAP: drop previously allocated swap slots for
    602  *		clustered swap-backed pages (except for "pg" if !NULL)
    603  *		"swblk" is the start of swap alloc (e.g. for ppsp[0])
    604  *		[only meaningful if swap-backed (uobj == NULL)]
    605  */
    606 
    607 
    608 void
    609 uvm_pager_dropcluster(uobj, pg, ppsp, npages, flags, swblk)
    610 	struct uvm_object *uobj;	/* IN */
    611 	struct vm_page *pg, **ppsp;	/* IN, IN/OUT */
    612 	int *npages;			/* IN/OUT */
    613 	int flags;
    614 	int swblk;			/* valid if (uobj == NULL &&
    615 					   PGO_REALLOCSWAP) */
    616 {
    617 	int lcv;
    618 	boolean_t obj_is_alive;
    619 	struct uvm_object *saved_uobj;
    620 
    621 	/*
    622 	 * if we need to reallocate swap space for the cluster we are dropping
    623 	 * (true if swap-backed and PGO_REALLOCSWAP) then free the old
    624 	 * allocation now.   save a block for "pg" if it is non-NULL.
    625 	 *
    626 	 * note that we will zap the object's pointer to swap in the "for" loop
    627 	 * below...
    628 	 */
    629 
    630 	if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    631 		if (pg)
    632 			uvm_swap_free(swblk + 1, *npages - 1);
    633 		else
    634 			uvm_swap_free(swblk, *npages);
    635 	}
    636 
    637 	/*
    638 	 * drop all pages but "pg"
    639 	 */
    640 
    641 	for (lcv = 0 ; lcv < *npages ; lcv++) {
    642 
    643 		/* skip "pg" or empty slot */
    644 		if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
    645 			continue;
    646 
    647 		/*
    648 		 * if swap-backed, gain lock on object that owns page.  note
    649 		 * that PQ_ANON bit can't change as long as we are holding
    650 		 * the PG_BUSY bit (so there is no need to lock the page
    651 		 * queues to test it).
    652 		 *
    653 		 * once we have the lock, dispose of the pointer to swap, if
    654 		 * requested
    655 		 */
    656 		if (!uobj) {
    657 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    658 				simple_lock(&ppsp[lcv]->uanon->an_lock);
    659 				if (flags & PGO_REALLOCSWAP)
    660 					  /* zap swap block */
    661 					  ppsp[lcv]->uanon->an_swslot = 0;
    662 			} else {
    663 				simple_lock(&ppsp[lcv]->uobject->vmobjlock);
    664 				if (flags & PGO_REALLOCSWAP)
    665 					uao_set_swslot(ppsp[lcv]->uobject,
    666 					    ppsp[lcv]->offset >> PAGE_SHIFT, 0);
    667 			}
    668 		}
    669 
    670 		/* did someone want the page while we had it busy-locked? */
    671 		if (ppsp[lcv]->flags & PG_WANTED) {
    672 			/* still holding obj lock */
    673 			wakeup(ppsp[lcv]);
    674 		}
    675 
    676 		/* if page was released, release it.  otherwise un-busy it */
    677 		if (ppsp[lcv]->flags & PG_RELEASED) {
    678 
    679 			if (ppsp[lcv]->pqflags & PQ_ANON) {
    680 				/* so that anfree will free */
    681 				ppsp[lcv]->flags &= ~(PG_BUSY);
    682 				UVM_PAGE_OWN(ppsp[lcv], NULL);
    683 
    684 				pmap_page_protect(PMAP_PGARG(ppsp[lcv]),
    685 				    VM_PROT_NONE); /* be safe */
    686 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    687 				/* kills anon and frees pg */
    688 				uvm_anfree(ppsp[lcv]->uanon);
    689 
    690 				continue;
    691 			}
    692 
    693 			/*
    694 			 * pgo_releasepg will dump the page for us
    695 			 */
    696 
    697 #ifdef DIAGNOSTIC
    698 			if (ppsp[lcv]->uobject->pgops->pgo_releasepg == NULL)
    699 				panic("uvm_pager_dropcluster: no releasepg "
    700 				    "function");
    701 #endif
    702 			saved_uobj = ppsp[lcv]->uobject;
    703 			obj_is_alive =
    704 			    saved_uobj->pgops->pgo_releasepg(ppsp[lcv], NULL);
    705 
    706 #ifdef DIAGNOSTIC
    707 			/* for normal objects, "pg" is still PG_BUSY by us,
    708 			 * so obj can't die */
    709 			if (uobj && !obj_is_alive)
    710 				panic("uvm_pager_dropcluster: object died "
    711 				    "with active page");
    712 #endif
    713 			/* only unlock the object if it is still alive...  */
    714 			if (obj_is_alive && saved_uobj != uobj)
    715 				simple_unlock(&saved_uobj->vmobjlock);
    716 
    717 			/*
    718 			 * XXXCDC: suppose uobj died in the pgo_releasepg?
    719 			 * how pass that
    720 			 * info up to caller.  we are currently ignoring it...
    721 			 */
    722 
    723 			continue;		/* next page */
    724 
    725 		} else {
    726 			ppsp[lcv]->flags &= ~(PG_BUSY|PG_WANTED|PG_FAKE);
    727 			UVM_PAGE_OWN(ppsp[lcv], NULL);
    728 		}
    729 
    730 		/*
    731 		 * if we are operating on behalf of the pagedaemon and we
    732 		 * had a successful pageout update the page!
    733 		 */
    734 		if (flags & PGO_PDFREECLUST) {
    735 			/* XXX: with PMAP_NEW ref should already be clear,
    736 			 * but don't trust! */
    737 			pmap_clear_reference(PMAP_PGARG(ppsp[lcv]));
    738 			pmap_clear_modify(PMAP_PGARG(ppsp[lcv]));
    739 			ppsp[lcv]->flags |= PG_CLEAN;
    740 		}
    741 
    742 		/* if anonymous cluster, unlock object and move on */
    743 		if (!uobj) {
    744 			if (ppsp[lcv]->pqflags & PQ_ANON)
    745 				simple_unlock(&ppsp[lcv]->uanon->an_lock);
    746 			else
    747 				simple_unlock(&ppsp[lcv]->uobject->vmobjlock);
    748 		}
    749 
    750 	}
    751 
    752 	/*
    753 	 * drop to a cluster of 1 page ("pg") if requested
    754 	 */
    755 
    756 	if (pg && (flags & PGO_PDFREECLUST) == 0) {
    757 		/*
    758 		 * if we are not a successful pageout, we make a 1 page cluster.
    759 		 */
    760 		ppsp[0] = pg;
    761 		*npages = 1;
    762 
    763 		/*
    764 		 * assign new swap block to new cluster, if anon backed
    765 		 */
    766 		if (uobj == NULL && (flags & PGO_REALLOCSWAP)) {
    767 			if (pg->pqflags & PQ_ANON) {
    768 				simple_lock(&pg->uanon->an_lock);
    769 				pg->uanon->an_swslot = swblk;	/* reassign */
    770 				simple_unlock(&pg->uanon->an_lock);
    771 			} else {
    772 				simple_lock(&pg->uobject->vmobjlock);
    773 				uao_set_swslot(pg->uobject,
    774 				    pg->offset >> PAGE_SHIFT, swblk);
    775 				simple_unlock(&pg->uobject->vmobjlock);
    776 			}
    777 		}
    778 	}
    779 }
    780 
    781 void
    782 uvm_aio_biodone(bp)
    783 	struct buf *bp;
    784 {
    785 	struct uvm_aiobuf *abp = (void *)bp;
    786 	int s;
    787 
    788 	s = splbio();
    789 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
    790 	TAILQ_INSERT_TAIL(&uvm.aio_done, &abp->aio, aioq);
    791 	wakeup(&uvm.aiodoned);
    792 	simple_unlock(&uvm.aiodoned_lock);
    793 	splx(s);
    794 }
    795 
    796 void
    797 uvm_aio_aiodone(aio)
    798 	struct uvm_aiodesc *aio;
    799 {
    800 	struct uvm_aiobuf *abp = aio->pd_ptr;
    801 	struct vm_page *pgs[aio->npages];
    802 	int s, i;
    803 	boolean_t release;
    804 
    805 	release = (abp->buf.b_flags & (B_ERROR|B_READ)) == (B_ERROR|B_READ);
    806 	for (i = 0; i < aio->npages; i++) {
    807 		pgs[i] = uvm_pageratop(aio->kva + (i << PAGE_SHIFT));
    808 
    809 		/*
    810 		 * if this is an async read and we got an error,
    811 		 * mark the pages PG_RELEASED so that uvm_pager_dropcluster()
    812 		 * will free them.
    813 		 */
    814 
    815 		if (release) {
    816 			pgs[i]->flags |= PG_RELEASED;
    817 		}
    818 	}
    819 	uvm_pagermapout(aio->kva, aio->npages);
    820 	uvm_pager_dropcluster((struct uvm_object *)abp->buf.b_vp, NULL, pgs,
    821 			      &aio->npages, PGO_PDFREECLUST, 0);
    822 
    823 	s = splbio();
    824 	pool_put(uvm_aiobuf_pool, abp);
    825 	splx(s);
    826 }
    827