Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.18.2.1.2.5
      1 /*	$NetBSD: uvm_aobj.c,v 1.18.2.1.2.5 1999/08/09 00:05:54 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/kernel.h>
     54 #include <sys/pool.h>
     55 
     56 #include <vm/vm.h>
     57 #include <vm/vm_page.h>
     58 #include <vm/vm_kern.h>
     59 
     60 #include <uvm/uvm.h>
     61 
     62 /*
     63  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     64  * to keeping the list of resident pages, it also keeps a list of
     65  * allocated swap blocks.  depending on the size of the aobj this list
     66  * of allocated swap blocks is either stored in an array (small objects)
     67  * or in a hash table (large objects).
     68  */
     69 
     70 /*
     71  * local structures
     72  */
     73 
     74 /*
     75  * for hash tables, we break the address space of the aobj into blocks
     76  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     77  * be a power of two.
     78  */
     79 
     80 #define UAO_SWHASH_CLUSTER_SHIFT 4
     81 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     82 
     83 /* get the "tag" for this page index */
     84 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     85 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     86 
     87 /* given an ELT and a page index, find the swap slot */
     88 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     90 
     91 /* given an ELT, return its pageidx base */
     92 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94 
     95 /*
     96  * the swhash hash function
     97  */
     98 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     99 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    100 			    & (AOBJ)->u_swhashmask)])
    101 
    102 /*
    103  * the swhash threshhold determines if we will use an array or a
    104  * hash table to store the list of allocated swap blocks.
    105  */
    106 
    107 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    108 #define UAO_USES_SWHASH(AOBJ) \
    109 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    110 
    111 /*
    112  * the number of buckets in a swhash, with an upper bound
    113  */
    114 #define UAO_SWHASH_MAXBUCKETS 256
    115 #define UAO_SWHASH_BUCKETS(AOBJ) \
    116 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    117 	     UAO_SWHASH_MAXBUCKETS))
    118 
    119 
    120 /*
    121  * uao_swhash_elt: when a hash table is being used, this structure defines
    122  * the format of an entry in the bucket list.
    123  */
    124 
    125 struct uao_swhash_elt {
    126 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    127 	voff_t tag;				/* our 'tag' */
    128 	int count;				/* our number of active slots */
    129 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    130 };
    131 
    132 /*
    133  * uao_swhash: the swap hash table structure
    134  */
    135 
    136 LIST_HEAD(uao_swhash, uao_swhash_elt);
    137 
    138 /*
    139  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    140  */
    141 
    142 struct pool uao_swhash_elt_pool;
    143 
    144 /*
    145  * uvm_aobj: the actual anon-backed uvm_object
    146  *
    147  * => the uvm_object is at the top of the structure, this allows
    148  *   (struct uvm_device *) == (struct uvm_object *)
    149  * => only one of u_swslots and u_swhash is used in any given aobj
    150  */
    151 
    152 struct uvm_aobj {
    153 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    154 	int u_pages;		 /* number of pages in entire object */
    155 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    156 	int *u_swslots;		 /* array of offset->swapslot mappings */
    157 				 /*
    158 				  * hashtable of offset->swapslot mappings
    159 				  * (u_swhash is an array of bucket heads)
    160 				  */
    161 	struct uao_swhash *u_swhash;
    162 	u_long u_swhashmask;		/* mask for hashtable */
    163 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    164 };
    165 
    166 /*
    167  * uvm_aobj_pool: pool of uvm_aobj structures
    168  */
    169 
    170 struct pool uvm_aobj_pool;
    171 
    172 /*
    173  * local functions
    174  */
    175 
    176 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    177 							  int, boolean_t));
    178 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
    179 static boolean_t		 uao_flush __P((struct uvm_object *,
    180 						voff_t, voff_t, int));
    181 static void			 uao_free __P((struct uvm_aobj *));
    182 static int			 uao_get __P((struct uvm_object *, voff_t,
    183 					      vm_page_t *, int *, int,
    184 					      vm_prot_t, int, int));
    185 static boolean_t		 uao_releasepg __P((struct vm_page *,
    186 						    struct vm_page **));
    187 
    188 /*
    189  * aobj_pager
    190  *
    191  * note that some functions (e.g. put) are handled elsewhere
    192  */
    193 
    194 struct uvm_pagerops aobj_pager = {
    195 	NULL,			/* init */
    196 	uao_reference,		/* reference */
    197 	uao_detach,		/* detach */
    198 	NULL,			/* fault */
    199 	uao_flush,		/* flush */
    200 	uao_get,		/* get */
    201 	NULL,			/* put (done by pagedaemon) */
    202 	NULL,			/* cluster */
    203 	NULL,			/* mk_pcluster */
    204 	uvm_shareprot,		/* shareprot */
    205 	NULL,			/* aiodone */
    206 	uao_releasepg		/* releasepg */
    207 };
    208 
    209 /*
    210  * uao_list: global list of active aobjs, locked by uao_list_lock
    211  */
    212 
    213 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    214 static simple_lock_data_t uao_list_lock;
    215 
    216 
    217 /*
    218  * functions
    219  */
    220 
    221 /*
    222  * hash table/array related functions
    223  */
    224 
    225 /*
    226  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    227  * offset.
    228  *
    229  * => the object should be locked by the caller
    230  */
    231 
    232 static struct uao_swhash_elt *
    233 uao_find_swhash_elt(aobj, pageidx, create)
    234 	struct uvm_aobj *aobj;
    235 	int pageidx;
    236 	boolean_t create;
    237 {
    238 	struct uao_swhash *swhash;
    239 	struct uao_swhash_elt *elt;
    240 	voff_t page_tag;
    241 
    242 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
    243 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
    244 
    245 	/*
    246 	 * now search the bucket for the requested tag
    247 	 */
    248 	for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
    249 		if (elt->tag == page_tag)
    250 			return(elt);
    251 	}
    252 
    253 	/* fail now if we are not allowed to create a new entry in the bucket */
    254 	if (!create)
    255 		return NULL;
    256 
    257 
    258 	/*
    259 	 * allocate a new entry for the bucket and init/insert it in
    260 	 */
    261 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
    262 	LIST_INSERT_HEAD(swhash, elt, list);
    263 	elt->tag = page_tag;
    264 	elt->count = 0;
    265 	memset(elt->slots, 0, sizeof(elt->slots));
    266 
    267 	return(elt);
    268 }
    269 
    270 /*
    271  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    272  *
    273  * => object must be locked by caller
    274  */
    275 __inline static int
    276 uao_find_swslot(aobj, pageidx)
    277 	struct uvm_aobj *aobj;
    278 	int pageidx;
    279 {
    280 
    281 	/*
    282 	 * if noswap flag is set, then we never return a slot
    283 	 */
    284 
    285 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    286 		return(0);
    287 
    288 	/*
    289 	 * if hashing, look in hash table.
    290 	 */
    291 
    292 	if (UAO_USES_SWHASH(aobj)) {
    293 		struct uao_swhash_elt *elt =
    294 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    295 
    296 		if (elt)
    297 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    298 		else
    299 			return(NULL);
    300 	}
    301 
    302 	/*
    303 	 * otherwise, look in the array
    304 	 */
    305 	return(aobj->u_swslots[pageidx]);
    306 }
    307 
    308 /*
    309  * uao_set_swslot: set the swap slot for a page in an aobj.
    310  *
    311  * => setting a slot to zero frees the slot
    312  * => object must be locked by caller
    313  */
    314 int
    315 uao_set_swslot(uobj, pageidx, slot)
    316 	struct uvm_object *uobj;
    317 	int pageidx, slot;
    318 {
    319 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    320 	int oldslot;
    321 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    322 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    323 	    aobj, pageidx, slot, 0);
    324 
    325 	/*
    326 	 * if noswap flag is set, then we can't set a slot
    327 	 */
    328 
    329 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    330 
    331 		if (slot == 0)
    332 			return(0);		/* a clear is ok */
    333 
    334 		/* but a set is not */
    335 		printf("uao_set_swslot: uobj = %p\n", uobj);
    336 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    337 	}
    338 
    339 	/*
    340 	 * are we using a hash table?  if so, add it in the hash.
    341 	 */
    342 
    343 	if (UAO_USES_SWHASH(aobj)) {
    344 		/*
    345 		 * Avoid allocating an entry just to free it again if
    346 		 * the page had not swap slot in the first place, and
    347 		 * we are freeing.
    348 		 */
    349 		struct uao_swhash_elt *elt =
    350 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    351 		if (elt == NULL) {
    352 #ifdef DIAGNOSTIC
    353 			if (slot)
    354 				panic("uao_set_swslot: didn't create elt");
    355 #endif
    356 			return (0);
    357 		}
    358 
    359 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    360 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    361 
    362 		/*
    363 		 * now adjust the elt's reference counter and free it if we've
    364 		 * dropped it to zero.
    365 		 */
    366 
    367 		/* an allocation? */
    368 		if (slot) {
    369 			if (oldslot == 0)
    370 				elt->count++;
    371 		} else {		/* freeing slot ... */
    372 			if (oldslot)	/* to be safe */
    373 				elt->count--;
    374 
    375 			if (elt->count == 0) {
    376 				LIST_REMOVE(elt, list);
    377 				pool_put(&uao_swhash_elt_pool, elt);
    378 			}
    379 		}
    380 	} else {
    381 		/* we are using an array */
    382 		oldslot = aobj->u_swslots[pageidx];
    383 		aobj->u_swslots[pageidx] = slot;
    384 	}
    385 	return (oldslot);
    386 }
    387 
    388 /*
    389  * end of hash/array functions
    390  */
    391 
    392 /*
    393  * uao_free: free all resources held by an aobj, and then free the aobj
    394  *
    395  * => the aobj should be dead
    396  */
    397 static void
    398 uao_free(aobj)
    399 	struct uvm_aobj *aobj;
    400 {
    401 
    402 	if (UAO_USES_SWHASH(aobj)) {
    403 		int i, hashbuckets = aobj->u_swhashmask + 1;
    404 
    405 		/*
    406 		 * free the swslots from each hash bucket,
    407 		 * then the hash bucket, and finally the hash table itself.
    408 		 */
    409 		for (i = 0; i < hashbuckets; i++) {
    410 			struct uao_swhash_elt *elt, *next;
    411 
    412 			for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
    413 			    elt = next) {
    414 				int j;
    415 
    416 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
    417 				{
    418 					int slot = elt->slots[j];
    419 
    420 					if (slot) {
    421 						uvm_swap_free(slot, 1);
    422 
    423 						/*
    424 						 * this page is no longer
    425 						 * only in swap.
    426 						 */
    427 						simple_lock(&uvm.swap_data_lock);
    428 						uvmexp.swpgonly--;
    429 						simple_unlock(&uvm.swap_data_lock);
    430 					}
    431 				}
    432 
    433 				next = elt->list.le_next;
    434 				pool_put(&uao_swhash_elt_pool, elt);
    435 			}
    436 		}
    437 		FREE(aobj->u_swhash, M_UVMAOBJ);
    438 	} else {
    439 		int i;
    440 
    441 		/*
    442 		 * free the array
    443 		 */
    444 
    445 		for (i = 0; i < aobj->u_pages; i++)
    446 		{
    447 			int slot = aobj->u_swslots[i];
    448 
    449 			if (slot) {
    450 				uvm_swap_free(slot, 1);
    451 
    452 				/* this page is no longer only in swap. */
    453 				simple_lock(&uvm.swap_data_lock);
    454 				uvmexp.swpgonly--;
    455 				simple_unlock(&uvm.swap_data_lock);
    456 			}
    457 		}
    458 		FREE(aobj->u_swslots, M_UVMAOBJ);
    459 	}
    460 
    461 	/*
    462 	 * finally free the aobj itself
    463 	 */
    464 	pool_put(&uvm_aobj_pool, aobj);
    465 }
    466 
    467 /*
    468  * pager functions
    469  */
    470 
    471 /*
    472  * uao_create: create an aobj of the given size and return its uvm_object.
    473  *
    474  * => for normal use, flags are always zero
    475  * => for the kernel object, the flags are:
    476  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    477  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    478  */
    479 struct uvm_object *
    480 uao_create(size, flags)
    481 	vsize_t size;
    482 	int flags;
    483 {
    484 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
    485 	static int kobj_alloced = 0;			/* not allocated yet */
    486 	int pages = round_page(size) >> PAGE_SHIFT;
    487 	struct uvm_aobj *aobj;
    488 
    489 	/*
    490 	 * malloc a new aobj unless we are asked for the kernel object
    491 	 */
    492 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    493 		if (kobj_alloced)
    494 			panic("uao_create: kernel object already allocated");
    495 
    496 		/*
    497 		 * XXXTHORPEJ: Need to call this now, so the pool gets
    498 		 * initialized!
    499 		 */
    500 		uao_init();
    501 
    502 		aobj = &kernel_object_store;
    503 		aobj->u_pages = pages;
    504 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    505 		/* we are special, we never die */
    506 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    507 		kobj_alloced = UAO_FLAG_KERNOBJ;
    508 	} else if (flags & UAO_FLAG_KERNSWAP) {
    509 		aobj = &kernel_object_store;
    510 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    511 		    panic("uao_create: asked to enable swap on kernel object");
    512 		kobj_alloced = UAO_FLAG_KERNSWAP;
    513 	} else {	/* normal object */
    514 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    515 		aobj->u_pages = pages;
    516 		aobj->u_flags = 0;		/* normal object */
    517 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    518 	}
    519 
    520 	/*
    521  	 * allocate hash/array if necessary
    522  	 *
    523  	 * note: in the KERNSWAP case no need to worry about locking since
    524  	 * we are still booting we should be the only thread around.
    525  	 */
    526 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    527 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    528 		    M_NOWAIT : M_WAITOK;
    529 
    530 		/* allocate hash table or array depending on object size */
    531 		if (UAO_USES_SWHASH(aobj)) {
    532 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    533 			    M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    534 			if (aobj->u_swhash == NULL)
    535 				panic("uao_create: hashinit swhash failed");
    536 		} else {
    537 			MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
    538 			    M_UVMAOBJ, mflags);
    539 			if (aobj->u_swslots == NULL)
    540 				panic("uao_create: malloc swslots failed");
    541 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    542 		}
    543 
    544 		if (flags) {
    545 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    546 			return(&aobj->u_obj);
    547 			/* done! */
    548 		}
    549 	}
    550 
    551 	/*
    552  	 * init aobj fields
    553  	 */
    554 	simple_lock_init(&aobj->u_obj.vmobjlock);
    555 	aobj->u_obj.pgops = &aobj_pager;
    556 	TAILQ_INIT(&aobj->u_obj.memq);
    557 	aobj->u_obj.uo_npages = 0;
    558 
    559 	/*
    560  	 * now that aobj is ready, add it to the global list
    561  	 */
    562 	simple_lock(&uao_list_lock);
    563 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    564 	simple_unlock(&uao_list_lock);
    565 
    566 	/*
    567  	 * done!
    568  	 */
    569 	return(&aobj->u_obj);
    570 }
    571 
    572 
    573 
    574 /*
    575  * uao_init: set up aobj pager subsystem
    576  *
    577  * => called at boot time from uvm_pager_init()
    578  */
    579 void
    580 uao_init()
    581 {
    582 	static int uao_initialized;
    583 
    584 	if (uao_initialized)
    585 		return;
    586 	uao_initialized = TRUE;
    587 
    588 	LIST_INIT(&uao_list);
    589 	simple_lock_init(&uao_list_lock);
    590 
    591 	/*
    592 	 * NOTE: Pages fror this pool must not come from a pageable
    593 	 * kernel map!
    594 	 */
    595 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    596 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    597 
    598 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    599 	    "aobjpl", 0,
    600 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    601 }
    602 
    603 /*
    604  * uao_reference: add a ref to an aobj
    605  *
    606  * => aobj must be unlocked (we will lock it)
    607  * just lock and call the locked version
    608  */
    609 void
    610 uao_reference(uobj)
    611 	struct uvm_object *uobj;
    612 {
    613 	simple_lock(&uobj->vmobjlock);
    614 	uao_reference_locked(uobj);
    615 	simple_unlock(&uobj->vmobjlock);
    616 }
    617 
    618 /*
    619  * uao_reference_locked: add a ref to an aobj that is already locked
    620  *
    621  * => aobj must be locked
    622  */
    623 void
    624 uao_reference_locked(uobj)
    625 	struct uvm_object *uobj;
    626 {
    627 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    628 
    629 	/*
    630  	 * kernel_object already has plenty of references, leave it alone.
    631  	 */
    632 
    633 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    634 		return;
    635 
    636 	uobj->uo_refs++;		/* bump! */
    637 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    638 	uobj, uobj->uo_refs,0,0);
    639 }
    640 
    641 
    642 /*
    643  * uao_detach: drop a reference to an aobj
    644  *
    645  * => aobj must be unlocked
    646  */
    647 void
    648 uao_detach(uobj)
    649 	struct uvm_object *uobj;
    650 {
    651 	simple_lock(&uobj->vmobjlock);
    652 	uao_detach_locked(uobj);
    653 }
    654 
    655 
    656 /*
    657  * uao_detach_locked: drop a reference to an aobj
    658  *
    659  * => aobj must be locked, and is unlocked (or freed) upon return.
    660  */
    661 void
    662 uao_detach_locked(uobj)
    663 	struct uvm_object *uobj;
    664 {
    665 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    666 	struct vm_page *pg;
    667 	boolean_t busybody;
    668 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    669 
    670 	/*
    671  	 * detaching from kernel_object is a noop.
    672  	 */
    673 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    674 		simple_unlock(&uobj->vmobjlock);
    675 		return;
    676 	}
    677 
    678 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    679 	uobj->uo_refs--;				/* drop ref! */
    680 	if (uobj->uo_refs) {				/* still more refs? */
    681 		simple_unlock(&uobj->vmobjlock);
    682 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    683 		return;
    684 	}
    685 
    686 	/*
    687  	 * remove the aobj from the global list.
    688  	 */
    689 	simple_lock(&uao_list_lock);
    690 	LIST_REMOVE(aobj, u_list);
    691 	simple_unlock(&uao_list_lock);
    692 
    693 	/*
    694  	 * free all the pages that aren't PG_BUSY,
    695 	 * mark for release any that are.
    696  	 */
    697 	busybody = FALSE;
    698 	for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
    699 
    700 		if (pg->flags & PG_BUSY) {
    701 			pg->flags |= PG_RELEASED;
    702 			busybody = TRUE;
    703 			continue;
    704 		}
    705 
    706 		/* zap the mappings, free the swap slot, free the page */
    707 		pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
    708 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    709 		uvm_lock_pageq();
    710 		uvm_pagefree(pg);
    711 		uvm_unlock_pageq();
    712 	}
    713 
    714 	/*
    715  	 * if we found any busy pages, we're done for now.
    716  	 * mark the aobj for death, releasepg will finish up for us.
    717  	 */
    718 	if (busybody) {
    719 		aobj->u_flags |= UAO_FLAG_KILLME;
    720 		simple_unlock(&aobj->u_obj.vmobjlock);
    721 		return;
    722 	}
    723 
    724 	/*
    725  	 * finally, free the rest.
    726  	 */
    727 	uao_free(aobj);
    728 }
    729 
    730 /*
    731  * uao_flush: "flush" pages out of a uvm object
    732  *
    733  * => object should be locked by caller.  we may _unlock_ the object
    734  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    735  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    736  *	XXXJRT a page, we simply just deactivate it.  Should probably
    737  *	XXXJRT handle this better, in the future (although "flushing"
    738  *	XXXJRT anonymous memory isn't terribly important).
    739  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    740  *	or block.
    741  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    742  *	for flushing.
    743  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    744  *	that new pages are inserted on the tail end of the list.  thus,
    745  *	we can make a complete pass through the object in one go by starting
    746  *	at the head and working towards the tail (new pages are put in
    747  *	front of us).
    748  * => NOTE: we are allowed to lock the page queues, so the caller
    749  *	must not be holding the lock on them [e.g. pagedaemon had
    750  *	better not call us with the queues locked]
    751  * => we return TRUE unless we encountered some sort of I/O error
    752  *	XXXJRT currently never happens, as we never directly initiate
    753  *	XXXJRT I/O
    754  *
    755  * comment on "cleaning" object and PG_BUSY pages:
    756  *	this routine is holding the lock on the object.  the only time
    757  *	that is can run into a PG_BUSY page that it does not own is if
    758  *	some other process has started I/O on the page (e.g. either
    759  *	a pagein or a pageout).  if the PG_BUSY page is being paged
    760  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    761  *	had a change to modify it yet.  if the PG_BUSY page is being
    762  *	paged out then it means that someone else has already started
    763  *	cleaning the page for us (how nice!).  in this case, if we
    764  *	have syncio specified, then after we make our pass through the
    765  *	object we need to wait for the other PG_BUSY pages to clear
    766  *	off (i.e. we need to do an iosync).  also note that once a
    767  *	page is PG_BUSY is must stary in its object until it is un-busyed.
    768  *	XXXJRT We never actually do this, as we are "flushing" anonymous
    769  *	XXXJRT memory, which doesn't have persistent backing store.
    770  *
    771  * note on page traversal:
    772  *	we can traverse the pages in an object either by going down the
    773  *	linked list in "uobj->memq", or we can go over the address range
    774  *	by page doing hash table lookups for each address.  depending
    775  *	on how many pages are in the object it may be cheaper to do one
    776  *	or the other.  we set "by_list" to true if we are using memq.
    777  *	if the cost of a hash lookup was equal to the cost of the list
    778  *	traversal we could compare the number of pages in the start->stop
    779  *	range to the total number of pages in the object.  however, it
    780  *	seems that a hash table lookup is more expensive than the linked
    781  *	list traversal, so we multiply the number of pages in the
    782  *	start->stop range by a penalty which we define below.
    783  */
    784 
    785 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
    786 
    787 boolean_t
    788 uao_flush(uobj, start, stop, flags)
    789 	struct uvm_object *uobj;
    790 	voff_t start, stop;
    791 	int flags;
    792 {
    793 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
    794 	struct vm_page *pp, *ppnext;
    795 	boolean_t retval, by_list;
    796 	voff_t curoff;
    797 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
    798 
    799 	curoff = 0;	/* XXX: shut up gcc */
    800 
    801 	retval = TRUE;	/* default to success */
    802 
    803 	if (flags & PGO_ALLPAGES) {
    804 		start = 0;
    805 		stop = aobj->u_pages << PAGE_SHIFT;
    806 		by_list = TRUE;		/* always go by the list */
    807 	} else {
    808 		start = trunc_page(start);
    809 		stop = round_page(stop);
    810 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    811 			printf("uao_flush: strange, got an out of range "
    812 			    "flush (fixed)\n");
    813 			stop = aobj->u_pages << PAGE_SHIFT;
    814 		}
    815 		by_list = (uobj->uo_npages <=
    816 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
    817 	}
    818 
    819 	UVMHIST_LOG(maphist,
    820 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    821 	    start, stop, by_list, flags);
    822 
    823 	/*
    824 	 * Don't need to do any work here if we're not freeing
    825 	 * or deactivating pages.
    826 	 */
    827 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    828 		UVMHIST_LOG(maphist,
    829 		    "<- done (no work to do)",0,0,0,0);
    830 		return (retval);
    831 	}
    832 
    833 	/*
    834 	 * now do it.  note: we must update ppnext in the body of loop or we
    835 	 * will get stuck.  we need to use ppnext because we may free "pp"
    836 	 * before doing the next loop.
    837 	 */
    838 
    839 	if (by_list) {
    840 		pp = uobj->memq.tqh_first;
    841 	} else {
    842 		curoff = start;
    843 		pp = uvm_pagelookup(uobj, curoff);
    844 	}
    845 
    846 	ppnext = NULL;	/* XXX: shut up gcc */
    847 	uvm_lock_pageq();	/* page queues locked */
    848 
    849 	/* locked: both page queues and uobj */
    850 	for ( ; (by_list && pp != NULL) ||
    851 	    (!by_list && curoff < stop) ; pp = ppnext) {
    852 		if (by_list) {
    853 			ppnext = pp->listq.tqe_next;
    854 
    855 			/* range check */
    856 			if (pp->offset < start || pp->offset >= stop)
    857 				continue;
    858 		} else {
    859 			curoff += PAGE_SIZE;
    860 			if (curoff < stop)
    861 				ppnext = uvm_pagelookup(uobj, curoff);
    862 
    863 			/* null check */
    864 			if (pp == NULL)
    865 				continue;
    866 		}
    867 
    868 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    869 		/*
    870 		 * XXX In these first 3 cases, we always just
    871 		 * XXX deactivate the page.  We may want to
    872 		 * XXX handle the different cases more specifically
    873 		 * XXX in the future.
    874 		 */
    875 		case PGO_CLEANIT|PGO_FREE:
    876 		case PGO_CLEANIT|PGO_DEACTIVATE:
    877 		case PGO_DEACTIVATE:
    878 			/* skip the page if it's loaned or wired */
    879 			if (pp->loan_count != 0 ||
    880 			    pp->wire_count != 0)
    881 				continue;
    882 
    883 			/* zap all mappings for the page. */
    884 			pmap_page_protect(PMAP_PGARG(pp),
    885 			    VM_PROT_NONE);
    886 
    887 			/* ...and deactivate the page. */
    888 			uvm_pagedeactivate(pp);
    889 
    890 			continue;
    891 
    892 		case PGO_FREE:
    893 			/* XXX skip the page if it's loaned or wired */
    894 			if (pp->loan_count != 0 ||
    895 			    pp->wire_count != 0)
    896 				continue;
    897 
    898 			/*
    899 			 * mark the page as released if its busy.
    900 			 */
    901 			if (pp->flags & PG_BUSY) {
    902 				pp->flags |= PG_RELEASED;
    903 				continue;
    904 			}
    905 
    906 			/* zap all mappings for the page. */
    907 			pmap_page_protect(PMAP_PGARG(pp),
    908 			    VM_PROT_NONE);
    909 
    910 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
    911 			uvm_pagefree(pp);
    912 
    913 			continue;
    914 
    915 		default:
    916 			panic("uao_flush: weird flags");
    917 		}
    918 #ifdef DIAGNOSTIC
    919 		panic("uao_flush: unreachable code");
    920 #endif
    921 	}
    922 
    923 	uvm_unlock_pageq();
    924 
    925 	UVMHIST_LOG(maphist,
    926 	    "<- done, rv=%d",retval,0,0,0);
    927 	return (retval);
    928 }
    929 
    930 /*
    931  * uao_get: fetch me a page
    932  *
    933  * we have three cases:
    934  * 1: page is resident     -> just return the page.
    935  * 2: page is zero-fill    -> allocate a new page and zero it.
    936  * 3: page is swapped out  -> fetch the page from swap.
    937  *
    938  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    939  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    940  * then we will need to return VM_PAGER_UNLOCK.
    941  *
    942  * => prefer map unlocked (not required)
    943  * => object must be locked!  we will _unlock_ it before starting any I/O.
    944  * => flags: PGO_ALLPAGES: get all of the pages
    945  *           PGO_LOCKED: fault data structures are locked
    946  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    947  * => NOTE: caller must check for released pages!!
    948  */
    949 static int
    950 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    951 	struct uvm_object *uobj;
    952 	voff_t offset;
    953 	struct vm_page **pps;
    954 	int *npagesp;
    955 	int centeridx, advice, flags;
    956 	vm_prot_t access_type;
    957 {
    958 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    959 	voff_t current_offset;
    960 	vm_page_t ptmp;
    961 	int lcv, gotpages, maxpages, swslot, rv;
    962 	boolean_t done;
    963 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    964 
    965 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    966 		    aobj, offset, flags,0);
    967 
    968 	/*
    969  	 * get number of pages
    970  	 */
    971 	maxpages = *npagesp;
    972 
    973 	/*
    974  	 * step 1: handled the case where fault data structures are locked.
    975  	 */
    976 
    977 	if (flags & PGO_LOCKED) {
    978 		/*
    979  		 * step 1a: get pages that are already resident.   only do
    980 		 * this if the data structures are locked (i.e. the first
    981 		 * time through).
    982  		 */
    983 
    984 		done = TRUE;	/* be optimistic */
    985 		gotpages = 0;	/* # of pages we got so far */
    986 
    987 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    988 		    lcv++, current_offset += PAGE_SIZE) {
    989 			/* do we care about this page?  if not, skip it */
    990 			if (pps[lcv] == PGO_DONTCARE)
    991 				continue;
    992 
    993 			ptmp = uvm_pagelookup(uobj, current_offset);
    994 
    995 			/*
    996  			 * if page is new, attempt to allocate the page, then
    997 			 * zero-fill it.
    998  			 */
    999 			if (ptmp == NULL && uao_find_swslot(aobj,
   1000 			    current_offset >> PAGE_SHIFT) == 0) {
   1001 				ptmp = uvm_pagealloc(uobj, current_offset,
   1002 				    NULL, 0);
   1003 				if (ptmp) {
   1004 					/* new page */
   1005 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
   1006 					ptmp->pqflags |= PQ_AOBJ;
   1007 					UVM_PAGE_OWN(ptmp, NULL);
   1008 					uvm_pagezero(ptmp);
   1009 				}
   1010 			}
   1011 
   1012 			/*
   1013 			 * to be useful must get a non-busy, non-released page
   1014 			 */
   1015 			if (ptmp == NULL ||
   1016 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1017 				if (lcv == centeridx ||
   1018 				    (flags & PGO_ALLPAGES) != 0)
   1019 					/* need to do a wait or I/O! */
   1020 					done = FALSE;
   1021 					continue;
   1022 			}
   1023 
   1024 			/*
   1025 			 * useful page: busy/lock it and plug it in our
   1026 			 * result array
   1027 			 */
   1028 			/* caller must un-busy this page */
   1029 			ptmp->flags |= PG_BUSY;
   1030 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1031 			pps[lcv] = ptmp;
   1032 			gotpages++;
   1033 
   1034 		}	/* "for" lcv loop */
   1035 
   1036 		/*
   1037  		 * step 1b: now we've either done everything needed or we
   1038 		 * to unlock and do some waiting or I/O.
   1039  		 */
   1040 
   1041 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1042 
   1043 		*npagesp = gotpages;
   1044 		if (done)
   1045 			/* bingo! */
   1046 			return(VM_PAGER_OK);
   1047 		else
   1048 			/* EEK!   Need to unlock and I/O */
   1049 			return(VM_PAGER_UNLOCK);
   1050 	}
   1051 
   1052 	/*
   1053  	 * step 2: get non-resident or busy pages.
   1054  	 * object is locked.   data structures are unlocked.
   1055  	 */
   1056 
   1057 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1058 	    lcv++, current_offset += PAGE_SIZE) {
   1059 		/*
   1060 		 * - skip over pages we've already gotten or don't want
   1061 		 * - skip over pages we don't _have_ to get
   1062 		 */
   1063 		if (pps[lcv] != NULL ||
   1064 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1065 			continue;
   1066 
   1067 		/*
   1068  		 * we have yet to locate the current page (pps[lcv]).   we
   1069 		 * first look for a page that is already at the current offset.
   1070 		 * if we find a page, we check to see if it is busy or
   1071 		 * released.  if that is the case, then we sleep on the page
   1072 		 * until it is no longer busy or released and repeat the lookup.
   1073 		 * if the page we found is neither busy nor released, then we
   1074 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1075 		 * 'break's the following while loop and indicates we are
   1076 		 * ready to move on to the next page in the "lcv" loop above.
   1077  		 *
   1078  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1079 		 * then it means that we allocated a new busy/fake/clean page
   1080 		 * ptmp in the object and we need to do I/O to fill in the data.
   1081  		 */
   1082 
   1083 		/* top of "pps" while loop */
   1084 		while (pps[lcv] == NULL) {
   1085 			/* look for a resident page */
   1086 			ptmp = uvm_pagelookup(uobj, current_offset);
   1087 
   1088 			/* not resident?   allocate one now (if we can) */
   1089 			if (ptmp == NULL) {
   1090 
   1091 				ptmp = uvm_pagealloc(uobj, current_offset,
   1092 				    NULL, 0);
   1093 
   1094 				/* out of RAM? */
   1095 				if (ptmp == NULL) {
   1096 					simple_unlock(&uobj->vmobjlock);
   1097 					UVMHIST_LOG(pdhist,
   1098 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1099 					uvm_wait("uao_getpage");
   1100 					simple_lock(&uobj->vmobjlock);
   1101 					/* goto top of pps while loop */
   1102 					continue;
   1103 				}
   1104 
   1105 				/*
   1106 				 * safe with PQ's unlocked: because we just
   1107 				 * alloc'd the page
   1108 				 */
   1109 				ptmp->pqflags |= PQ_AOBJ;
   1110 
   1111 				/*
   1112 				 * got new page ready for I/O.  break pps while
   1113 				 * loop.  pps[lcv] is still NULL.
   1114 				 */
   1115 				break;
   1116 			}
   1117 
   1118 			/* page is there, see if we need to wait on it */
   1119 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1120 				ptmp->flags |= PG_WANTED;
   1121 				UVMHIST_LOG(pdhist,
   1122 				    "sleeping, ptmp->flags 0x%x\n",
   1123 				    ptmp->flags,0,0,0);
   1124 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1125 				    FALSE, "uao_get", 0);
   1126 				simple_lock(&uobj->vmobjlock);
   1127 				continue;	/* goto top of pps while loop */
   1128 			}
   1129 
   1130 			/*
   1131  			 * if we get here then the page has become resident and
   1132 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1133 			 * own it) and set pps[lcv] (so that we exit the while
   1134 			 * loop).
   1135  			 */
   1136 			/* we own it, caller must un-busy */
   1137 			ptmp->flags |= PG_BUSY;
   1138 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1139 			pps[lcv] = ptmp;
   1140 		}
   1141 
   1142 		/*
   1143  		 * if we own the valid page at the correct offset, pps[lcv] will
   1144  		 * point to it.   nothing more to do except go to the next page.
   1145  		 */
   1146 		if (pps[lcv])
   1147 			continue;			/* next lcv */
   1148 
   1149 		/*
   1150  		 * we have a "fake/busy/clean" page that we just allocated.
   1151  		 * do the needed "i/o", either reading from swap or zeroing.
   1152  		 */
   1153 		swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
   1154 
   1155 		/*
   1156  		 * just zero the page if there's nothing in swap.
   1157  		 */
   1158 		if (swslot == 0)
   1159 		{
   1160 			/*
   1161 			 * page hasn't existed before, just zero it.
   1162 			 */
   1163 			uvm_pagezero(ptmp);
   1164 		}
   1165 		else
   1166 		{
   1167 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1168 			     swslot, 0,0,0);
   1169 
   1170 			/*
   1171 			 * page in the swapped-out page.
   1172 			 * unlock object for i/o, relock when done.
   1173 			 */
   1174 			simple_unlock(&uobj->vmobjlock);
   1175 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1176 			simple_lock(&uobj->vmobjlock);
   1177 
   1178 			/*
   1179 			 * I/O done.  check for errors.
   1180 			 */
   1181 			if (rv != VM_PAGER_OK)
   1182 			{
   1183 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1184 				    rv,0,0,0);
   1185 				if (ptmp->flags & PG_WANTED)
   1186 					/* object lock still held */
   1187 					wakeup(ptmp);
   1188 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
   1189 				UVM_PAGE_OWN(ptmp, NULL);
   1190 				uvm_lock_pageq();
   1191 				uvm_pagefree(ptmp);
   1192 				uvm_unlock_pageq();
   1193 
   1194 				simple_unlock(&uobj->vmobjlock);
   1195 				return (rv);
   1196 			}
   1197 		}
   1198 
   1199 		/*
   1200  		 * we got the page!   clear the fake flag (indicates valid
   1201 		 * data now in page) and plug into our result array.   note
   1202 		 * that page is still busy.
   1203  		 *
   1204  		 * it is the callers job to:
   1205  		 * => check if the page is released
   1206  		 * => unbusy the page
   1207  		 * => activate the page
   1208  		 */
   1209 
   1210 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1211 		pmap_clear_modify(PMAP_PGARG(ptmp));	/* ... and clean */
   1212 		pps[lcv] = ptmp;
   1213 
   1214 	}	/* lcv loop */
   1215 
   1216 	/*
   1217  	 * finally, unlock object and return.
   1218  	 */
   1219 
   1220 	simple_unlock(&uobj->vmobjlock);
   1221 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1222 	return(VM_PAGER_OK);
   1223 }
   1224 
   1225 /*
   1226  * uao_releasepg: handle released page in an aobj
   1227  *
   1228  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1229  *      to dispose of.
   1230  * => caller must handle PG_WANTED case
   1231  * => called with page's object locked, pageq's unlocked
   1232  * => returns TRUE if page's object is still alive, FALSE if we
   1233  *      killed the page's object.    if we return TRUE, then we
   1234  *      return with the object locked.
   1235  * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
   1236  *                              with the page queues locked [for pagedaemon]
   1237  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1238  * => we kill the aobj if it is not referenced and we are suppose to
   1239  *      kill it ("KILLME").
   1240  */
   1241 static boolean_t
   1242 uao_releasepg(pg, nextpgp)
   1243 	struct vm_page *pg;
   1244 	struct vm_page **nextpgp;	/* OUT */
   1245 {
   1246 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1247 
   1248 #ifdef DIAGNOSTIC
   1249 	if ((pg->flags & PG_RELEASED) == 0)
   1250 		panic("uao_releasepg: page not released!");
   1251 #endif
   1252 
   1253 	/*
   1254  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1255  	 */
   1256 	pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
   1257 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
   1258 	uvm_lock_pageq();
   1259 	if (nextpgp)
   1260 		*nextpgp = pg->pageq.tqe_next;	/* next page for daemon */
   1261 	uvm_pagefree(pg);
   1262 	if (!nextpgp)
   1263 		uvm_unlock_pageq();		/* keep locked for daemon */
   1264 
   1265 	/*
   1266  	 * if we're not killing the object, we're done.
   1267  	 */
   1268 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1269 		return TRUE;
   1270 
   1271 #ifdef DIAGNOSTIC
   1272 	if (aobj->u_obj.uo_refs)
   1273 		panic("uvm_km_releasepg: kill flag set on referenced object!");
   1274 #endif
   1275 
   1276 	/*
   1277  	 * if there are still pages in the object, we're done for now.
   1278  	 */
   1279 	if (aobj->u_obj.uo_npages != 0)
   1280 		return TRUE;
   1281 
   1282 #ifdef DIAGNOSTIC
   1283 	if (aobj->u_obj.memq.tqh_first)
   1284 		panic("uvn_releasepg: pages in object with npages == 0");
   1285 #endif
   1286 
   1287 	/*
   1288  	 * finally, free the rest.
   1289  	 */
   1290 	uao_free(aobj);
   1291 
   1292 	return FALSE;
   1293 }
   1294 
   1295 /*
   1296  * uao_dropswap:  release any swap resources from this aobj page.
   1297  *
   1298  * => aobj must be locked or have a reference count of 0.
   1299  */
   1300 
   1301 void
   1302 uao_dropswap(uobj, pageidx)
   1303 	struct uvm_object *uobj;
   1304 	int pageidx;
   1305 {
   1306 	int slot;
   1307 
   1308 	slot = uao_set_swslot(uobj, pageidx, 0);
   1309 	if (slot) {
   1310 		uvm_swap_free(slot, 1);
   1311 	}
   1312 }
   1313