Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.28
      1 /*	$NetBSD: uvm_aobj.c,v 1.28 2000/03/26 20:54:46 kleink Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/pool.h>
     54 #include <sys/kernel.h>
     55 
     56 #include <vm/vm.h>
     57 #include <vm/vm_page.h>
     58 #include <vm/vm_kern.h>
     59 
     60 #include <uvm/uvm.h>
     61 
     62 /*
     63  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     64  * to keeping the list of resident pages, it also keeps a list of
     65  * allocated swap blocks.  depending on the size of the aobj this list
     66  * of allocated swap blocks is either stored in an array (small objects)
     67  * or in a hash table (large objects).
     68  */
     69 
     70 /*
     71  * local structures
     72  */
     73 
     74 /*
     75  * for hash tables, we break the address space of the aobj into blocks
     76  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     77  * be a power of two.
     78  */
     79 
     80 #define UAO_SWHASH_CLUSTER_SHIFT 4
     81 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     82 
     83 /* get the "tag" for this page index */
     84 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     85 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     86 
     87 /* given an ELT and a page index, find the swap slot */
     88 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     90 
     91 /* given an ELT, return its pageidx base */
     92 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94 
     95 /*
     96  * the swhash hash function
     97  */
     98 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     99 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    100 			    & (AOBJ)->u_swhashmask)])
    101 
    102 /*
    103  * the swhash threshhold determines if we will use an array or a
    104  * hash table to store the list of allocated swap blocks.
    105  */
    106 
    107 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    108 #define UAO_USES_SWHASH(AOBJ) \
    109 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    110 
    111 /*
    112  * the number of buckets in a swhash, with an upper bound
    113  */
    114 #define UAO_SWHASH_MAXBUCKETS 256
    115 #define UAO_SWHASH_BUCKETS(AOBJ) \
    116 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    117 	     UAO_SWHASH_MAXBUCKETS))
    118 
    119 
    120 /*
    121  * uao_swhash_elt: when a hash table is being used, this structure defines
    122  * the format of an entry in the bucket list.
    123  */
    124 
    125 struct uao_swhash_elt {
    126 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    127 	voff_t tag;				/* our 'tag' */
    128 	int count;				/* our number of active slots */
    129 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    130 };
    131 
    132 /*
    133  * uao_swhash: the swap hash table structure
    134  */
    135 
    136 LIST_HEAD(uao_swhash, uao_swhash_elt);
    137 
    138 /*
    139  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    140  */
    141 
    142 struct pool uao_swhash_elt_pool;
    143 
    144 /*
    145  * uvm_aobj: the actual anon-backed uvm_object
    146  *
    147  * => the uvm_object is at the top of the structure, this allows
    148  *   (struct uvm_device *) == (struct uvm_object *)
    149  * => only one of u_swslots and u_swhash is used in any given aobj
    150  */
    151 
    152 struct uvm_aobj {
    153 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    154 	int u_pages;		 /* number of pages in entire object */
    155 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    156 	int *u_swslots;		 /* array of offset->swapslot mappings */
    157 				 /*
    158 				  * hashtable of offset->swapslot mappings
    159 				  * (u_swhash is an array of bucket heads)
    160 				  */
    161 	struct uao_swhash *u_swhash;
    162 	u_long u_swhashmask;		/* mask for hashtable */
    163 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    164 };
    165 
    166 /*
    167  * uvm_aobj_pool: pool of uvm_aobj structures
    168  */
    169 
    170 struct pool uvm_aobj_pool;
    171 
    172 /*
    173  * local functions
    174  */
    175 
    176 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    177 							  int, boolean_t));
    178 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
    179 static boolean_t		 uao_flush __P((struct uvm_object *,
    180 						voff_t, voff_t, int));
    181 static void			 uao_free __P((struct uvm_aobj *));
    182 static int			 uao_get __P((struct uvm_object *, voff_t,
    183 					      vm_page_t *, int *, int,
    184 					      vm_prot_t, int, int));
    185 static boolean_t		 uao_releasepg __P((struct vm_page *,
    186 						    struct vm_page **));
    187 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
    188 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
    189 
    190 
    191 
    192 /*
    193  * aobj_pager
    194  *
    195  * note that some functions (e.g. put) are handled elsewhere
    196  */
    197 
    198 struct uvm_pagerops aobj_pager = {
    199 	NULL,			/* init */
    200 	uao_reference,		/* reference */
    201 	uao_detach,		/* detach */
    202 	NULL,			/* fault */
    203 	uao_flush,		/* flush */
    204 	uao_get,		/* get */
    205 	NULL,			/* asyncget */
    206 	NULL,			/* put (done by pagedaemon) */
    207 	NULL,			/* cluster */
    208 	NULL,			/* mk_pcluster */
    209 	uvm_shareprot,		/* shareprot */
    210 	NULL,			/* aiodone */
    211 	uao_releasepg		/* releasepg */
    212 };
    213 
    214 /*
    215  * uao_list: global list of active aobjs, locked by uao_list_lock
    216  */
    217 
    218 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    219 static simple_lock_data_t uao_list_lock;
    220 
    221 
    222 /*
    223  * functions
    224  */
    225 
    226 /*
    227  * hash table/array related functions
    228  */
    229 
    230 /*
    231  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    232  * offset.
    233  *
    234  * => the object should be locked by the caller
    235  */
    236 
    237 static struct uao_swhash_elt *
    238 uao_find_swhash_elt(aobj, pageidx, create)
    239 	struct uvm_aobj *aobj;
    240 	int pageidx;
    241 	boolean_t create;
    242 {
    243 	struct uao_swhash *swhash;
    244 	struct uao_swhash_elt *elt;
    245 	voff_t page_tag;
    246 
    247 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
    248 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
    249 
    250 	/*
    251 	 * now search the bucket for the requested tag
    252 	 */
    253 	for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
    254 		if (elt->tag == page_tag)
    255 			return(elt);
    256 	}
    257 
    258 	/* fail now if we are not allowed to create a new entry in the bucket */
    259 	if (!create)
    260 		return NULL;
    261 
    262 
    263 	/*
    264 	 * allocate a new entry for the bucket and init/insert it in
    265 	 */
    266 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
    267 	LIST_INSERT_HEAD(swhash, elt, list);
    268 	elt->tag = page_tag;
    269 	elt->count = 0;
    270 	memset(elt->slots, 0, sizeof(elt->slots));
    271 
    272 	return(elt);
    273 }
    274 
    275 /*
    276  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    277  *
    278  * => object must be locked by caller
    279  */
    280 __inline static int
    281 uao_find_swslot(aobj, pageidx)
    282 	struct uvm_aobj *aobj;
    283 	int pageidx;
    284 {
    285 
    286 	/*
    287 	 * if noswap flag is set, then we never return a slot
    288 	 */
    289 
    290 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    291 		return(0);
    292 
    293 	/*
    294 	 * if hashing, look in hash table.
    295 	 */
    296 
    297 	if (UAO_USES_SWHASH(aobj)) {
    298 		struct uao_swhash_elt *elt =
    299 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    300 
    301 		if (elt)
    302 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    303 		else
    304 			return(NULL);
    305 	}
    306 
    307 	/*
    308 	 * otherwise, look in the array
    309 	 */
    310 	return(aobj->u_swslots[pageidx]);
    311 }
    312 
    313 /*
    314  * uao_set_swslot: set the swap slot for a page in an aobj.
    315  *
    316  * => setting a slot to zero frees the slot
    317  * => object must be locked by caller
    318  */
    319 int
    320 uao_set_swslot(uobj, pageidx, slot)
    321 	struct uvm_object *uobj;
    322 	int pageidx, slot;
    323 {
    324 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    325 	int oldslot;
    326 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    327 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    328 	    aobj, pageidx, slot, 0);
    329 
    330 	/*
    331 	 * if noswap flag is set, then we can't set a slot
    332 	 */
    333 
    334 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    335 
    336 		if (slot == 0)
    337 			return(0);		/* a clear is ok */
    338 
    339 		/* but a set is not */
    340 		printf("uao_set_swslot: uobj = %p\n", uobj);
    341 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    342 	}
    343 
    344 	/*
    345 	 * are we using a hash table?  if so, add it in the hash.
    346 	 */
    347 
    348 	if (UAO_USES_SWHASH(aobj)) {
    349 		/*
    350 		 * Avoid allocating an entry just to free it again if
    351 		 * the page had not swap slot in the first place, and
    352 		 * we are freeing.
    353 		 */
    354 		struct uao_swhash_elt *elt =
    355 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    356 		if (elt == NULL) {
    357 #ifdef DIAGNOSTIC
    358 			if (slot)
    359 				panic("uao_set_swslot: didn't create elt");
    360 #endif
    361 			return (0);
    362 		}
    363 
    364 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    365 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    366 
    367 		/*
    368 		 * now adjust the elt's reference counter and free it if we've
    369 		 * dropped it to zero.
    370 		 */
    371 
    372 		/* an allocation? */
    373 		if (slot) {
    374 			if (oldslot == 0)
    375 				elt->count++;
    376 		} else {		/* freeing slot ... */
    377 			if (oldslot)	/* to be safe */
    378 				elt->count--;
    379 
    380 			if (elt->count == 0) {
    381 				LIST_REMOVE(elt, list);
    382 				pool_put(&uao_swhash_elt_pool, elt);
    383 			}
    384 		}
    385 
    386 	} else {
    387 		/* we are using an array */
    388 		oldslot = aobj->u_swslots[pageidx];
    389 		aobj->u_swslots[pageidx] = slot;
    390 	}
    391 	return (oldslot);
    392 }
    393 
    394 /*
    395  * end of hash/array functions
    396  */
    397 
    398 /*
    399  * uao_free: free all resources held by an aobj, and then free the aobj
    400  *
    401  * => the aobj should be dead
    402  */
    403 static void
    404 uao_free(aobj)
    405 	struct uvm_aobj *aobj;
    406 {
    407 
    408 	simple_unlock(&aobj->u_obj.vmobjlock);
    409 
    410 	if (UAO_USES_SWHASH(aobj)) {
    411 		int i, hashbuckets = aobj->u_swhashmask + 1;
    412 
    413 		/*
    414 		 * free the swslots from each hash bucket,
    415 		 * then the hash bucket, and finally the hash table itself.
    416 		 */
    417 		for (i = 0; i < hashbuckets; i++) {
    418 			struct uao_swhash_elt *elt, *next;
    419 
    420 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
    421 			     elt != NULL;
    422 			     elt = next) {
    423 				int j;
    424 
    425 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
    426 					int slot = elt->slots[j];
    427 
    428 					if (slot) {
    429 						uvm_swap_free(slot, 1);
    430 
    431 						/*
    432 						 * this page is no longer
    433 						 * only in swap.
    434 						 */
    435 						simple_lock(&uvm.swap_data_lock);
    436 						uvmexp.swpgonly--;
    437 						simple_unlock(&uvm.swap_data_lock);
    438 					}
    439 				}
    440 
    441 				next = LIST_NEXT(elt, list);
    442 				pool_put(&uao_swhash_elt_pool, elt);
    443 			}
    444 		}
    445 		FREE(aobj->u_swhash, M_UVMAOBJ);
    446 	} else {
    447 		int i;
    448 
    449 		/*
    450 		 * free the array
    451 		 */
    452 
    453 		for (i = 0; i < aobj->u_pages; i++) {
    454 			int slot = aobj->u_swslots[i];
    455 
    456 			if (slot) {
    457 				uvm_swap_free(slot, 1);
    458 
    459 				/* this page is no longer only in swap. */
    460 				simple_lock(&uvm.swap_data_lock);
    461 				uvmexp.swpgonly--;
    462 				simple_unlock(&uvm.swap_data_lock);
    463 			}
    464 		}
    465 		FREE(aobj->u_swslots, M_UVMAOBJ);
    466 	}
    467 
    468 	/*
    469 	 * finally free the aobj itself
    470 	 */
    471 	pool_put(&uvm_aobj_pool, aobj);
    472 }
    473 
    474 /*
    475  * pager functions
    476  */
    477 
    478 /*
    479  * uao_create: create an aobj of the given size and return its uvm_object.
    480  *
    481  * => for normal use, flags are always zero
    482  * => for the kernel object, the flags are:
    483  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    484  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    485  */
    486 struct uvm_object *
    487 uao_create(size, flags)
    488 	vsize_t size;
    489 	int flags;
    490 {
    491 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
    492 	static int kobj_alloced = 0;			/* not allocated yet */
    493 	int pages = round_page(size) >> PAGE_SHIFT;
    494 	struct uvm_aobj *aobj;
    495 
    496 	/*
    497 	 * malloc a new aobj unless we are asked for the kernel object
    498 	 */
    499 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    500 		if (kobj_alloced)
    501 			panic("uao_create: kernel object already allocated");
    502 
    503 		aobj = &kernel_object_store;
    504 		aobj->u_pages = pages;
    505 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    506 		/* we are special, we never die */
    507 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    508 		kobj_alloced = UAO_FLAG_KERNOBJ;
    509 	} else if (flags & UAO_FLAG_KERNSWAP) {
    510 		aobj = &kernel_object_store;
    511 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    512 		    panic("uao_create: asked to enable swap on kernel object");
    513 		kobj_alloced = UAO_FLAG_KERNSWAP;
    514 	} else {	/* normal object */
    515 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    516 		aobj->u_pages = pages;
    517 		aobj->u_flags = 0;		/* normal object */
    518 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    519 	}
    520 
    521 	/*
    522  	 * allocate hash/array if necessary
    523  	 *
    524  	 * note: in the KERNSWAP case no need to worry about locking since
    525  	 * we are still booting we should be the only thread around.
    526  	 */
    527 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    528 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    529 		    M_NOWAIT : M_WAITOK;
    530 
    531 		/* allocate hash table or array depending on object size */
    532 		if (UAO_USES_SWHASH(aobj)) {
    533 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    534 			    M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    535 			if (aobj->u_swhash == NULL)
    536 				panic("uao_create: hashinit swhash failed");
    537 		} else {
    538 			MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
    539 			    M_UVMAOBJ, mflags);
    540 			if (aobj->u_swslots == NULL)
    541 				panic("uao_create: malloc swslots failed");
    542 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    543 		}
    544 
    545 		if (flags) {
    546 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    547 			return(&aobj->u_obj);
    548 			/* done! */
    549 		}
    550 	}
    551 
    552 	/*
    553  	 * init aobj fields
    554  	 */
    555 	simple_lock_init(&aobj->u_obj.vmobjlock);
    556 	aobj->u_obj.pgops = &aobj_pager;
    557 	TAILQ_INIT(&aobj->u_obj.memq);
    558 	aobj->u_obj.uo_npages = 0;
    559 
    560 	/*
    561  	 * now that aobj is ready, add it to the global list
    562  	 */
    563 	simple_lock(&uao_list_lock);
    564 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    565 	simple_unlock(&uao_list_lock);
    566 
    567 	/*
    568  	 * done!
    569  	 */
    570 	return(&aobj->u_obj);
    571 }
    572 
    573 
    574 
    575 /*
    576  * uao_init: set up aobj pager subsystem
    577  *
    578  * => called at boot time from uvm_pager_init()
    579  */
    580 void
    581 uao_init()
    582 {
    583 	static int uao_initialized;
    584 
    585 	if (uao_initialized)
    586 		return;
    587 	uao_initialized = TRUE;
    588 
    589 	LIST_INIT(&uao_list);
    590 	simple_lock_init(&uao_list_lock);
    591 
    592 	/*
    593 	 * NOTE: Pages fror this pool must not come from a pageable
    594 	 * kernel map!
    595 	 */
    596 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    597 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    598 
    599 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    600 	    "aobjpl", 0,
    601 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    602 }
    603 
    604 /*
    605  * uao_reference: add a ref to an aobj
    606  *
    607  * => aobj must be unlocked
    608  * => just lock it and call the locked version
    609  */
    610 void
    611 uao_reference(uobj)
    612 	struct uvm_object *uobj;
    613 {
    614 	simple_lock(&uobj->vmobjlock);
    615 	uao_reference_locked(uobj);
    616 	simple_unlock(&uobj->vmobjlock);
    617 }
    618 
    619 /*
    620  * uao_reference_locked: add a ref to an aobj that is already locked
    621  *
    622  * => aobj must be locked
    623  * this needs to be separate from the normal routine
    624  * since sometimes we need to add a reference to an aobj when
    625  * it's already locked.
    626  */
    627 void
    628 uao_reference_locked(uobj)
    629 	struct uvm_object *uobj;
    630 {
    631 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    632 
    633 	/*
    634  	 * kernel_object already has plenty of references, leave it alone.
    635  	 */
    636 
    637 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    638 		return;
    639 
    640 	uobj->uo_refs++;		/* bump! */
    641 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    642 		    uobj, uobj->uo_refs,0,0);
    643 }
    644 
    645 
    646 /*
    647  * uao_detach: drop a reference to an aobj
    648  *
    649  * => aobj must be unlocked
    650  * => just lock it and call the locked version
    651  */
    652 void
    653 uao_detach(uobj)
    654 	struct uvm_object *uobj;
    655 {
    656 	simple_lock(&uobj->vmobjlock);
    657 	uao_detach_locked(uobj);
    658 }
    659 
    660 
    661 /*
    662  * uao_detach_locked: drop a reference to an aobj
    663  *
    664  * => aobj must be locked, and is unlocked (or freed) upon return.
    665  * this needs to be separate from the normal routine
    666  * since sometimes we need to detach from an aobj when
    667  * it's already locked.
    668  */
    669 void
    670 uao_detach_locked(uobj)
    671 	struct uvm_object *uobj;
    672 {
    673 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    674 	struct vm_page *pg;
    675 	boolean_t busybody;
    676 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    677 
    678 	/*
    679  	 * detaching from kernel_object is a noop.
    680  	 */
    681 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    682 		simple_unlock(&uobj->vmobjlock);
    683 		return;
    684 	}
    685 
    686 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    687 	uobj->uo_refs--;				/* drop ref! */
    688 	if (uobj->uo_refs) {				/* still more refs? */
    689 		simple_unlock(&uobj->vmobjlock);
    690 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    691 		return;
    692 	}
    693 
    694 	/*
    695  	 * remove the aobj from the global list.
    696  	 */
    697 	simple_lock(&uao_list_lock);
    698 	LIST_REMOVE(aobj, u_list);
    699 	simple_unlock(&uao_list_lock);
    700 
    701 	/*
    702  	 * free all the pages that aren't PG_BUSY,
    703 	 * mark for release any that are.
    704  	 */
    705 	busybody = FALSE;
    706 	for (pg = TAILQ_FIRST(&uobj->memq);
    707 	     pg != NULL;
    708 	     pg = TAILQ_NEXT(pg, listq)) {
    709 		if (pg->flags & PG_BUSY) {
    710 			pg->flags |= PG_RELEASED;
    711 			busybody = TRUE;
    712 			continue;
    713 		}
    714 
    715 		/* zap the mappings, free the swap slot, free the page */
    716 		pmap_page_protect(pg, VM_PROT_NONE);
    717 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    718 		uvm_lock_pageq();
    719 		uvm_pagefree(pg);
    720 		uvm_unlock_pageq();
    721 	}
    722 
    723 	/*
    724  	 * if we found any busy pages, we're done for now.
    725  	 * mark the aobj for death, releasepg will finish up for us.
    726  	 */
    727 	if (busybody) {
    728 		aobj->u_flags |= UAO_FLAG_KILLME;
    729 		simple_unlock(&aobj->u_obj.vmobjlock);
    730 		return;
    731 	}
    732 
    733 	/*
    734  	 * finally, free the rest.
    735  	 */
    736 	uao_free(aobj);
    737 }
    738 
    739 /*
    740  * uao_flush: "flush" pages out of a uvm object
    741  *
    742  * => object should be locked by caller.  we may _unlock_ the object
    743  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    744  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    745  *	XXXJRT a page, we simply just deactivate it.  Should probably
    746  *	XXXJRT handle this better, in the future (although "flushing"
    747  *	XXXJRT anonymous memory isn't terribly important).
    748  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    749  *	or block.
    750  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    751  *	for flushing.
    752  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    753  *	that new pages are inserted on the tail end of the list.  thus,
    754  *	we can make a complete pass through the object in one go by starting
    755  *	at the head and working towards the tail (new pages are put in
    756  *	front of us).
    757  * => NOTE: we are allowed to lock the page queues, so the caller
    758  *	must not be holding the lock on them [e.g. pagedaemon had
    759  *	better not call us with the queues locked]
    760  * => we return TRUE unless we encountered some sort of I/O error
    761  *	XXXJRT currently never happens, as we never directly initiate
    762  *	XXXJRT I/O
    763  *
    764  * comment on "cleaning" object and PG_BUSY pages:
    765  *	this routine is holding the lock on the object.  the only time
    766  *	that is can run into a PG_BUSY page that it does not own is if
    767  *	some other process has started I/O on the page (e.g. either
    768  *	a pagein or a pageout).  if the PG_BUSY page is being paged
    769  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    770  *	had a change to modify it yet.  if the PG_BUSY page is being
    771  *	paged out then it means that someone else has already started
    772  *	cleaning the page for us (how nice!).  in this case, if we
    773  *	have syncio specified, then after we make our pass through the
    774  *	object we need to wait for the other PG_BUSY pages to clear
    775  *	off (i.e. we need to do an iosync).  also note that once a
    776  *	page is PG_BUSY is must stary in its object until it is un-busyed.
    777  *	XXXJRT We never actually do this, as we are "flushing" anonymous
    778  *	XXXJRT memory, which doesn't have persistent backing store.
    779  *
    780  * note on page traversal:
    781  *	we can traverse the pages in an object either by going down the
    782  *	linked list in "uobj->memq", or we can go over the address range
    783  *	by page doing hash table lookups for each address.  depending
    784  *	on how many pages are in the object it may be cheaper to do one
    785  *	or the other.  we set "by_list" to true if we are using memq.
    786  *	if the cost of a hash lookup was equal to the cost of the list
    787  *	traversal we could compare the number of pages in the start->stop
    788  *	range to the total number of pages in the object.  however, it
    789  *	seems that a hash table lookup is more expensive than the linked
    790  *	list traversal, so we multiply the number of pages in the
    791  *	start->stop range by a penalty which we define below.
    792  */
    793 
    794 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
    795 
    796 boolean_t
    797 uao_flush(uobj, start, stop, flags)
    798 	struct uvm_object *uobj;
    799 	voff_t start, stop;
    800 	int flags;
    801 {
    802 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
    803 	struct vm_page *pp, *ppnext;
    804 	boolean_t retval, by_list;
    805 	voff_t curoff;
    806 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
    807 
    808 	curoff = 0;	/* XXX: shut up gcc */
    809 
    810 	retval = TRUE;	/* default to success */
    811 
    812 	if (flags & PGO_ALLPAGES) {
    813 		start = 0;
    814 		stop = aobj->u_pages << PAGE_SHIFT;
    815 		by_list = TRUE;		/* always go by the list */
    816 	} else {
    817 		start = trunc_page(start);
    818 		stop = round_page(stop);
    819 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    820 			printf("uao_flush: strange, got an out of range "
    821 			    "flush (fixed)\n");
    822 			stop = aobj->u_pages << PAGE_SHIFT;
    823 		}
    824 		by_list = (uobj->uo_npages <=
    825 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
    826 	}
    827 
    828 	UVMHIST_LOG(maphist,
    829 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    830 	    start, stop, by_list, flags);
    831 
    832 	/*
    833 	 * Don't need to do any work here if we're not freeing
    834 	 * or deactivating pages.
    835 	 */
    836 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    837 		UVMHIST_LOG(maphist,
    838 		    "<- done (no work to do)",0,0,0,0);
    839 		return (retval);
    840 	}
    841 
    842 	/*
    843 	 * now do it.  note: we must update ppnext in the body of loop or we
    844 	 * will get stuck.  we need to use ppnext because we may free "pp"
    845 	 * before doing the next loop.
    846 	 */
    847 
    848 	if (by_list) {
    849 		pp = uobj->memq.tqh_first;
    850 	} else {
    851 		curoff = start;
    852 		pp = uvm_pagelookup(uobj, curoff);
    853 	}
    854 
    855 	ppnext = NULL;	/* XXX: shut up gcc */
    856 	uvm_lock_pageq();	/* page queues locked */
    857 
    858 	/* locked: both page queues and uobj */
    859 	for ( ; (by_list && pp != NULL) ||
    860 	    (!by_list && curoff < stop) ; pp = ppnext) {
    861 		if (by_list) {
    862 			ppnext = pp->listq.tqe_next;
    863 
    864 			/* range check */
    865 			if (pp->offset < start || pp->offset >= stop)
    866 				continue;
    867 		} else {
    868 			curoff += PAGE_SIZE;
    869 			if (curoff < stop)
    870 				ppnext = uvm_pagelookup(uobj, curoff);
    871 
    872 			/* null check */
    873 			if (pp == NULL)
    874 				continue;
    875 		}
    876 
    877 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    878 		/*
    879 		 * XXX In these first 3 cases, we always just
    880 		 * XXX deactivate the page.  We may want to
    881 		 * XXX handle the different cases more specifically
    882 		 * XXX in the future.
    883 		 */
    884 		case PGO_CLEANIT|PGO_FREE:
    885 		case PGO_CLEANIT|PGO_DEACTIVATE:
    886 		case PGO_DEACTIVATE:
    887  deactivate_it:
    888 			/* skip the page if it's loaned or wired */
    889 			if (pp->loan_count != 0 ||
    890 			    pp->wire_count != 0)
    891 				continue;
    892 
    893 			/* zap all mappings for the page. */
    894 			pmap_page_protect(pp, VM_PROT_NONE);
    895 
    896 			/* ...and deactivate the page. */
    897 			uvm_pagedeactivate(pp);
    898 
    899 			continue;
    900 
    901 		case PGO_FREE:
    902 			/*
    903 			 * If there are multiple references to
    904 			 * the object, just deactivate the page.
    905 			 */
    906 			if (uobj->uo_refs > 1)
    907 				goto deactivate_it;
    908 
    909 			/* XXX skip the page if it's loaned or wired */
    910 			if (pp->loan_count != 0 ||
    911 			    pp->wire_count != 0)
    912 				continue;
    913 
    914 			/*
    915 			 * mark the page as released if its busy.
    916 			 */
    917 			if (pp->flags & PG_BUSY) {
    918 				pp->flags |= PG_RELEASED;
    919 				continue;
    920 			}
    921 
    922 			/* zap all mappings for the page. */
    923 			pmap_page_protect(pp, VM_PROT_NONE);
    924 
    925 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
    926 			uvm_pagefree(pp);
    927 
    928 			continue;
    929 
    930 		default:
    931 			panic("uao_flush: weird flags");
    932 		}
    933 #ifdef DIAGNOSTIC
    934 		panic("uao_flush: unreachable code");
    935 #endif
    936 	}
    937 
    938 	uvm_unlock_pageq();
    939 
    940 	UVMHIST_LOG(maphist,
    941 	    "<- done, rv=%d",retval,0,0,0);
    942 	return (retval);
    943 }
    944 
    945 /*
    946  * uao_get: fetch me a page
    947  *
    948  * we have three cases:
    949  * 1: page is resident     -> just return the page.
    950  * 2: page is zero-fill    -> allocate a new page and zero it.
    951  * 3: page is swapped out  -> fetch the page from swap.
    952  *
    953  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    954  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    955  * then we will need to return VM_PAGER_UNLOCK.
    956  *
    957  * => prefer map unlocked (not required)
    958  * => object must be locked!  we will _unlock_ it before starting any I/O.
    959  * => flags: PGO_ALLPAGES: get all of the pages
    960  *           PGO_LOCKED: fault data structures are locked
    961  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    962  * => NOTE: caller must check for released pages!!
    963  */
    964 static int
    965 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    966 	struct uvm_object *uobj;
    967 	voff_t offset;
    968 	struct vm_page **pps;
    969 	int *npagesp;
    970 	int centeridx, advice, flags;
    971 	vm_prot_t access_type;
    972 {
    973 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    974 	voff_t current_offset;
    975 	vm_page_t ptmp;
    976 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
    977 	boolean_t done;
    978 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    979 
    980 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    981 		    aobj, offset, flags,0);
    982 
    983 	/*
    984  	 * get number of pages
    985  	 */
    986 	maxpages = *npagesp;
    987 
    988 	/*
    989  	 * step 1: handled the case where fault data structures are locked.
    990  	 */
    991 
    992 	if (flags & PGO_LOCKED) {
    993 		/*
    994  		 * step 1a: get pages that are already resident.   only do
    995 		 * this if the data structures are locked (i.e. the first
    996 		 * time through).
    997  		 */
    998 
    999 		done = TRUE;	/* be optimistic */
   1000 		gotpages = 0;	/* # of pages we got so far */
   1001 
   1002 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1003 		    lcv++, current_offset += PAGE_SIZE) {
   1004 			/* do we care about this page?  if not, skip it */
   1005 			if (pps[lcv] == PGO_DONTCARE)
   1006 				continue;
   1007 
   1008 			ptmp = uvm_pagelookup(uobj, current_offset);
   1009 
   1010 			/*
   1011  			 * if page is new, attempt to allocate the page, then
   1012 			 * zero-fill it.
   1013  			 */
   1014 			if (ptmp == NULL && uao_find_swslot(aobj,
   1015 			    current_offset >> PAGE_SHIFT) == 0) {
   1016 				ptmp = uvm_pagealloc(uobj, current_offset,
   1017 				    NULL, 0);
   1018 				if (ptmp) {
   1019 					/* new page */
   1020 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
   1021 					ptmp->pqflags |= PQ_AOBJ;
   1022 					UVM_PAGE_OWN(ptmp, NULL);
   1023 					uvm_pagezero(ptmp);
   1024 				}
   1025 			}
   1026 
   1027 			/*
   1028 			 * to be useful must get a non-busy, non-released page
   1029 			 */
   1030 			if (ptmp == NULL ||
   1031 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1032 				if (lcv == centeridx ||
   1033 				    (flags & PGO_ALLPAGES) != 0)
   1034 					/* need to do a wait or I/O! */
   1035 					done = FALSE;
   1036 					continue;
   1037 			}
   1038 
   1039 			/*
   1040 			 * useful page: busy/lock it and plug it in our
   1041 			 * result array
   1042 			 */
   1043 			/* caller must un-busy this page */
   1044 			ptmp->flags |= PG_BUSY;
   1045 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1046 			pps[lcv] = ptmp;
   1047 			gotpages++;
   1048 
   1049 		}	/* "for" lcv loop */
   1050 
   1051 		/*
   1052  		 * step 1b: now we've either done everything needed or we
   1053 		 * to unlock and do some waiting or I/O.
   1054  		 */
   1055 
   1056 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1057 
   1058 		*npagesp = gotpages;
   1059 		if (done)
   1060 			/* bingo! */
   1061 			return(VM_PAGER_OK);
   1062 		else
   1063 			/* EEK!   Need to unlock and I/O */
   1064 			return(VM_PAGER_UNLOCK);
   1065 	}
   1066 
   1067 	/*
   1068  	 * step 2: get non-resident or busy pages.
   1069  	 * object is locked.   data structures are unlocked.
   1070  	 */
   1071 
   1072 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1073 	    lcv++, current_offset += PAGE_SIZE) {
   1074 
   1075 		/*
   1076 		 * - skip over pages we've already gotten or don't want
   1077 		 * - skip over pages we don't _have_ to get
   1078 		 */
   1079 
   1080 		if (pps[lcv] != NULL ||
   1081 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1082 			continue;
   1083 
   1084 		pageidx = current_offset >> PAGE_SHIFT;
   1085 
   1086 		/*
   1087  		 * we have yet to locate the current page (pps[lcv]).   we
   1088 		 * first look for a page that is already at the current offset.
   1089 		 * if we find a page, we check to see if it is busy or
   1090 		 * released.  if that is the case, then we sleep on the page
   1091 		 * until it is no longer busy or released and repeat the lookup.
   1092 		 * if the page we found is neither busy nor released, then we
   1093 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1094 		 * 'break's the following while loop and indicates we are
   1095 		 * ready to move on to the next page in the "lcv" loop above.
   1096  		 *
   1097  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1098 		 * then it means that we allocated a new busy/fake/clean page
   1099 		 * ptmp in the object and we need to do I/O to fill in the data.
   1100  		 */
   1101 
   1102 		/* top of "pps" while loop */
   1103 		while (pps[lcv] == NULL) {
   1104 			/* look for a resident page */
   1105 			ptmp = uvm_pagelookup(uobj, current_offset);
   1106 
   1107 			/* not resident?   allocate one now (if we can) */
   1108 			if (ptmp == NULL) {
   1109 
   1110 				ptmp = uvm_pagealloc(uobj, current_offset,
   1111 				    NULL, 0);
   1112 
   1113 				/* out of RAM? */
   1114 				if (ptmp == NULL) {
   1115 					simple_unlock(&uobj->vmobjlock);
   1116 					UVMHIST_LOG(pdhist,
   1117 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1118 					uvm_wait("uao_getpage");
   1119 					simple_lock(&uobj->vmobjlock);
   1120 					/* goto top of pps while loop */
   1121 					continue;
   1122 				}
   1123 
   1124 				/*
   1125 				 * safe with PQ's unlocked: because we just
   1126 				 * alloc'd the page
   1127 				 */
   1128 				ptmp->pqflags |= PQ_AOBJ;
   1129 
   1130 				/*
   1131 				 * got new page ready for I/O.  break pps while
   1132 				 * loop.  pps[lcv] is still NULL.
   1133 				 */
   1134 				break;
   1135 			}
   1136 
   1137 			/* page is there, see if we need to wait on it */
   1138 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1139 				ptmp->flags |= PG_WANTED;
   1140 				UVMHIST_LOG(pdhist,
   1141 				    "sleeping, ptmp->flags 0x%x\n",
   1142 				    ptmp->flags,0,0,0);
   1143 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1144 				    FALSE, "uao_get", 0);
   1145 				simple_lock(&uobj->vmobjlock);
   1146 				continue;	/* goto top of pps while loop */
   1147 			}
   1148 
   1149 			/*
   1150  			 * if we get here then the page has become resident and
   1151 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1152 			 * own it) and set pps[lcv] (so that we exit the while
   1153 			 * loop).
   1154  			 */
   1155 			/* we own it, caller must un-busy */
   1156 			ptmp->flags |= PG_BUSY;
   1157 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1158 			pps[lcv] = ptmp;
   1159 		}
   1160 
   1161 		/*
   1162  		 * if we own the valid page at the correct offset, pps[lcv] will
   1163  		 * point to it.   nothing more to do except go to the next page.
   1164  		 */
   1165 		if (pps[lcv])
   1166 			continue;			/* next lcv */
   1167 
   1168 		/*
   1169  		 * we have a "fake/busy/clean" page that we just allocated.
   1170  		 * do the needed "i/o", either reading from swap or zeroing.
   1171  		 */
   1172 		swslot = uao_find_swslot(aobj, pageidx);
   1173 
   1174 		/*
   1175  		 * just zero the page if there's nothing in swap.
   1176  		 */
   1177 		if (swslot == 0)
   1178 		{
   1179 			/*
   1180 			 * page hasn't existed before, just zero it.
   1181 			 */
   1182 			uvm_pagezero(ptmp);
   1183 		} else {
   1184 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1185 			     swslot, 0,0,0);
   1186 
   1187 			/*
   1188 			 * page in the swapped-out page.
   1189 			 * unlock object for i/o, relock when done.
   1190 			 */
   1191 			simple_unlock(&uobj->vmobjlock);
   1192 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1193 			simple_lock(&uobj->vmobjlock);
   1194 
   1195 			/*
   1196 			 * I/O done.  check for errors.
   1197 			 */
   1198 			if (rv != VM_PAGER_OK)
   1199 			{
   1200 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1201 				    rv,0,0,0);
   1202 				if (ptmp->flags & PG_WANTED)
   1203 					wakeup(ptmp);
   1204 
   1205 				/*
   1206 				 * remove the swap slot from the aobj
   1207 				 * and mark the aobj as having no real slot.
   1208 				 * don't free the swap slot, thus preventing
   1209 				 * it from being used again.
   1210 				 */
   1211 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1212 							SWSLOT_BAD);
   1213 				uvm_swap_markbad(swslot, 1);
   1214 
   1215 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
   1216 				UVM_PAGE_OWN(ptmp, NULL);
   1217 				uvm_lock_pageq();
   1218 				uvm_pagefree(ptmp);
   1219 				uvm_unlock_pageq();
   1220 
   1221 				simple_unlock(&uobj->vmobjlock);
   1222 				return (rv);
   1223 			}
   1224 		}
   1225 
   1226 		/*
   1227  		 * we got the page!   clear the fake flag (indicates valid
   1228 		 * data now in page) and plug into our result array.   note
   1229 		 * that page is still busy.
   1230  		 *
   1231  		 * it is the callers job to:
   1232  		 * => check if the page is released
   1233  		 * => unbusy the page
   1234  		 * => activate the page
   1235  		 */
   1236 
   1237 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1238 		pmap_clear_modify(ptmp);		/* ... and clean */
   1239 		pps[lcv] = ptmp;
   1240 
   1241 	}	/* lcv loop */
   1242 
   1243 	/*
   1244  	 * finally, unlock object and return.
   1245  	 */
   1246 
   1247 	simple_unlock(&uobj->vmobjlock);
   1248 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1249 	return(VM_PAGER_OK);
   1250 }
   1251 
   1252 /*
   1253  * uao_releasepg: handle released page in an aobj
   1254  *
   1255  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1256  *      to dispose of.
   1257  * => caller must handle PG_WANTED case
   1258  * => called with page's object locked, pageq's unlocked
   1259  * => returns TRUE if page's object is still alive, FALSE if we
   1260  *      killed the page's object.    if we return TRUE, then we
   1261  *      return with the object locked.
   1262  * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
   1263  *                              with the page queues locked [for pagedaemon]
   1264  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1265  * => we kill the aobj if it is not referenced and we are suppose to
   1266  *      kill it ("KILLME").
   1267  */
   1268 static boolean_t
   1269 uao_releasepg(pg, nextpgp)
   1270 	struct vm_page *pg;
   1271 	struct vm_page **nextpgp;	/* OUT */
   1272 {
   1273 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1274 
   1275 #ifdef DIAGNOSTIC
   1276 	if ((pg->flags & PG_RELEASED) == 0)
   1277 		panic("uao_releasepg: page not released!");
   1278 #endif
   1279 
   1280 	/*
   1281  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1282  	 */
   1283 	pmap_page_protect(pg, VM_PROT_NONE);
   1284 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
   1285 	uvm_lock_pageq();
   1286 	if (nextpgp)
   1287 		*nextpgp = pg->pageq.tqe_next;	/* next page for daemon */
   1288 	uvm_pagefree(pg);
   1289 	if (!nextpgp)
   1290 		uvm_unlock_pageq();		/* keep locked for daemon */
   1291 
   1292 	/*
   1293  	 * if we're not killing the object, we're done.
   1294  	 */
   1295 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1296 		return TRUE;
   1297 
   1298 #ifdef DIAGNOSTIC
   1299 	if (aobj->u_obj.uo_refs)
   1300 		panic("uvm_km_releasepg: kill flag set on referenced object!");
   1301 #endif
   1302 
   1303 	/*
   1304  	 * if there are still pages in the object, we're done for now.
   1305  	 */
   1306 	if (aobj->u_obj.uo_npages != 0)
   1307 		return TRUE;
   1308 
   1309 #ifdef DIAGNOSTIC
   1310 	if (TAILQ_FIRST(&aobj->u_obj.memq))
   1311 		panic("uvn_releasepg: pages in object with npages == 0");
   1312 #endif
   1313 
   1314 	/*
   1315  	 * finally, free the rest.
   1316  	 */
   1317 	uao_free(aobj);
   1318 
   1319 	return FALSE;
   1320 }
   1321 
   1322 
   1323 /*
   1324  * uao_dropswap:  release any swap resources from this aobj page.
   1325  *
   1326  * => aobj must be locked or have a reference count of 0.
   1327  */
   1328 
   1329 void
   1330 uao_dropswap(uobj, pageidx)
   1331 	struct uvm_object *uobj;
   1332 	int pageidx;
   1333 {
   1334 	int slot;
   1335 
   1336 	slot = uao_set_swslot(uobj, pageidx, 0);
   1337 	if (slot) {
   1338 		uvm_swap_free(slot, 1);
   1339 	}
   1340 }
   1341 
   1342 
   1343 /*
   1344  * page in every page in every aobj that is paged-out to a range of swslots.
   1345  *
   1346  * => nothing should be locked.
   1347  * => returns TRUE if pagein was aborted due to lack of memory.
   1348  */
   1349 boolean_t
   1350 uao_swap_off(startslot, endslot)
   1351 	int startslot, endslot;
   1352 {
   1353 	struct uvm_aobj *aobj, *nextaobj;
   1354 
   1355 	/*
   1356 	 * walk the list of all aobjs.
   1357 	 */
   1358 
   1359 restart:
   1360 	simple_lock(&uao_list_lock);
   1361 
   1362 	for (aobj = LIST_FIRST(&uao_list);
   1363 	     aobj != NULL;
   1364 	     aobj = nextaobj) {
   1365 		boolean_t rv;
   1366 
   1367 		/*
   1368 		 * try to get the object lock,
   1369 		 * start all over if we fail.
   1370 		 * most of the time we'll get the aobj lock,
   1371 		 * so this should be a rare case.
   1372 		 */
   1373 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
   1374 			simple_unlock(&uao_list_lock);
   1375 			goto restart;
   1376 		}
   1377 
   1378 		/*
   1379 		 * add a ref to the aobj so it doesn't disappear
   1380 		 * while we're working.
   1381 		 */
   1382 		uao_reference_locked(&aobj->u_obj);
   1383 
   1384 		/*
   1385 		 * now it's safe to unlock the uao list.
   1386 		 */
   1387 		simple_unlock(&uao_list_lock);
   1388 
   1389 		/*
   1390 		 * page in any pages in the swslot range.
   1391 		 * if there's an error, abort and return the error.
   1392 		 */
   1393 		rv = uao_pagein(aobj, startslot, endslot);
   1394 		if (rv) {
   1395 			uao_detach_locked(&aobj->u_obj);
   1396 			return rv;
   1397 		}
   1398 
   1399 		/*
   1400 		 * we're done with this aobj.
   1401 		 * relock the list and drop our ref on the aobj.
   1402 		 */
   1403 		simple_lock(&uao_list_lock);
   1404 		nextaobj = LIST_NEXT(aobj, u_list);
   1405 		uao_detach_locked(&aobj->u_obj);
   1406 	}
   1407 
   1408 	/*
   1409 	 * done with traversal, unlock the list
   1410 	 */
   1411 	simple_unlock(&uao_list_lock);
   1412 	return FALSE;
   1413 }
   1414 
   1415 
   1416 /*
   1417  * page in any pages from aobj in the given range.
   1418  *
   1419  * => aobj must be locked and is returned locked.
   1420  * => returns TRUE if pagein was aborted due to lack of memory.
   1421  */
   1422 static boolean_t
   1423 uao_pagein(aobj, startslot, endslot)
   1424 	struct uvm_aobj *aobj;
   1425 	int startslot, endslot;
   1426 {
   1427 	boolean_t rv;
   1428 
   1429 	if (UAO_USES_SWHASH(aobj)) {
   1430 		struct uao_swhash_elt *elt;
   1431 		int bucket;
   1432 
   1433 restart:
   1434 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
   1435 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
   1436 			     elt != NULL;
   1437 			     elt = LIST_NEXT(elt, list)) {
   1438 				int i;
   1439 
   1440 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1441 					int slot = elt->slots[i];
   1442 
   1443 					/*
   1444 					 * if the slot isn't in range, skip it.
   1445 					 */
   1446 					if (slot < startslot ||
   1447 					    slot >= endslot) {
   1448 						continue;
   1449 					}
   1450 
   1451 					/*
   1452 					 * process the page,
   1453 					 * the start over on this object
   1454 					 * since the swhash elt
   1455 					 * may have been freed.
   1456 					 */
   1457 					rv = uao_pagein_page(aobj,
   1458 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1459 					if (rv) {
   1460 						return rv;
   1461 					}
   1462 					goto restart;
   1463 				}
   1464 			}
   1465 		}
   1466 	} else {
   1467 		int i;
   1468 
   1469 		for (i = 0; i < aobj->u_pages; i++) {
   1470 			int slot = aobj->u_swslots[i];
   1471 
   1472 			/*
   1473 			 * if the slot isn't in range, skip it
   1474 			 */
   1475 			if (slot < startslot || slot >= endslot) {
   1476 				continue;
   1477 			}
   1478 
   1479 			/*
   1480 			 * process the page.
   1481 			 */
   1482 			rv = uao_pagein_page(aobj, i);
   1483 			if (rv) {
   1484 				return rv;
   1485 			}
   1486 		}
   1487 	}
   1488 
   1489 	return FALSE;
   1490 }
   1491 
   1492 /*
   1493  * page in a page from an aobj.  used for swap_off.
   1494  * returns TRUE if pagein was aborted due to lack of memory.
   1495  *
   1496  * => aobj must be locked and is returned locked.
   1497  */
   1498 static boolean_t
   1499 uao_pagein_page(aobj, pageidx)
   1500 	struct uvm_aobj *aobj;
   1501 	int pageidx;
   1502 {
   1503 	struct vm_page *pg;
   1504 	int rv, slot, npages;
   1505 	UVMHIST_FUNC("uao_pagein_page");  UVMHIST_CALLED(pdhist);
   1506 
   1507 	pg = NULL;
   1508 	npages = 1;
   1509 	/* locked: aobj */
   1510 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1511 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
   1512 	/* unlocked: aobj */
   1513 
   1514 	/*
   1515 	 * relock and finish up.
   1516 	 */
   1517 	simple_lock(&aobj->u_obj.vmobjlock);
   1518 
   1519 	switch (rv) {
   1520 	case VM_PAGER_OK:
   1521 		break;
   1522 
   1523 	case VM_PAGER_ERROR:
   1524 	case VM_PAGER_REFAULT:
   1525 		/*
   1526 		 * nothing more to do on errors.
   1527 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
   1528 		 * so again there's nothing to do.
   1529 		 */
   1530 		return FALSE;
   1531 
   1532 #ifdef DIAGNOSTIC
   1533 	default:
   1534 		panic("uao_pagein_page: uao_get -> %d\n", rv);
   1535 #endif
   1536 	}
   1537 
   1538 #ifdef DIAGNOSTIC
   1539 	/*
   1540 	 * this should never happen, since we have a reference on the aobj.
   1541 	 */
   1542 	if (pg->flags & PG_RELEASED) {
   1543 		panic("uao_pagein_page: found PG_RELEASED page?\n");
   1544 	}
   1545 #endif
   1546 
   1547 	/*
   1548 	 * ok, we've got the page now.
   1549 	 * mark it as dirty, clear its swslot and un-busy it.
   1550 	 */
   1551 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
   1552 	uvm_swap_free(slot, 1);
   1553 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
   1554 	UVM_PAGE_OWN(pg, NULL);
   1555 
   1556 	/*
   1557 	 * deactivate the page (to put it on a page queue).
   1558 	 */
   1559 	pmap_clear_reference(pg);
   1560 	pmap_page_protect(pg, VM_PROT_NONE);
   1561 	uvm_lock_pageq();
   1562 	uvm_pagedeactivate(pg);
   1563 	uvm_unlock_pageq();
   1564 
   1565 	return FALSE;
   1566 }
   1567