Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.38
      1 /*	$NetBSD: uvm_aobj.c,v 1.38 2001/01/28 23:30:42 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/kernel.h>
     54 #include <sys/pool.h>
     55 #include <sys/kernel.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61  * to keeping the list of resident pages, it also keeps a list of
     62  * allocated swap blocks.  depending on the size of the aobj this list
     63  * of allocated swap blocks is either stored in an array (small objects)
     64  * or in a hash table (large objects).
     65  */
     66 
     67 /*
     68  * local structures
     69  */
     70 
     71 /*
     72  * for hash tables, we break the address space of the aobj into blocks
     73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74  * be a power of two.
     75  */
     76 
     77 #define UAO_SWHASH_CLUSTER_SHIFT 4
     78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79 
     80 /* get the "tag" for this page index */
     81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83 
     84 /* given an ELT and a page index, find the swap slot */
     85 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     86 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     87 
     88 /* given an ELT, return its pageidx base */
     89 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     90 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     91 
     92 /*
     93  * the swhash hash function
     94  */
     95 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     96 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     97 			    & (AOBJ)->u_swhashmask)])
     98 
     99 /*
    100  * the swhash threshhold determines if we will use an array or a
    101  * hash table to store the list of allocated swap blocks.
    102  */
    103 
    104 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    105 #define UAO_USES_SWHASH(AOBJ) \
    106 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    107 
    108 /*
    109  * the number of buckets in a swhash, with an upper bound
    110  */
    111 #define UAO_SWHASH_MAXBUCKETS 256
    112 #define UAO_SWHASH_BUCKETS(AOBJ) \
    113 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    114 	     UAO_SWHASH_MAXBUCKETS))
    115 
    116 
    117 /*
    118  * uao_swhash_elt: when a hash table is being used, this structure defines
    119  * the format of an entry in the bucket list.
    120  */
    121 
    122 struct uao_swhash_elt {
    123 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    124 	voff_t tag;				/* our 'tag' */
    125 	int count;				/* our number of active slots */
    126 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    127 };
    128 
    129 /*
    130  * uao_swhash: the swap hash table structure
    131  */
    132 
    133 LIST_HEAD(uao_swhash, uao_swhash_elt);
    134 
    135 /*
    136  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    137  */
    138 
    139 struct pool uao_swhash_elt_pool;
    140 
    141 /*
    142  * uvm_aobj: the actual anon-backed uvm_object
    143  *
    144  * => the uvm_object is at the top of the structure, this allows
    145  *   (struct uvm_device *) == (struct uvm_object *)
    146  * => only one of u_swslots and u_swhash is used in any given aobj
    147  */
    148 
    149 struct uvm_aobj {
    150 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    151 	int u_pages;		 /* number of pages in entire object */
    152 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    153 	int *u_swslots;		 /* array of offset->swapslot mappings */
    154 				 /*
    155 				  * hashtable of offset->swapslot mappings
    156 				  * (u_swhash is an array of bucket heads)
    157 				  */
    158 	struct uao_swhash *u_swhash;
    159 	u_long u_swhashmask;		/* mask for hashtable */
    160 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    161 };
    162 
    163 /*
    164  * uvm_aobj_pool: pool of uvm_aobj structures
    165  */
    166 
    167 struct pool uvm_aobj_pool;
    168 
    169 /*
    170  * local functions
    171  */
    172 
    173 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    174 							  int, boolean_t));
    175 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
    176 static boolean_t		 uao_flush __P((struct uvm_object *,
    177 						voff_t, voff_t, int));
    178 static void			 uao_free __P((struct uvm_aobj *));
    179 static int			 uao_get __P((struct uvm_object *, voff_t,
    180 					      vm_page_t *, int *, int,
    181 					      vm_prot_t, int, int));
    182 static boolean_t		 uao_releasepg __P((struct vm_page *,
    183 						    struct vm_page **));
    184 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
    185 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
    186 
    187 /*
    188  * aobj_pager
    189  *
    190  * note that some functions (e.g. put) are handled elsewhere
    191  */
    192 
    193 struct uvm_pagerops aobj_pager = {
    194 	NULL,			/* init */
    195 	uao_reference,		/* reference */
    196 	uao_detach,		/* detach */
    197 	NULL,			/* fault */
    198 	uao_flush,		/* flush */
    199 	uao_get,		/* get */
    200 	NULL,			/* put (done by pagedaemon) */
    201 	NULL,			/* cluster */
    202 	NULL,			/* mk_pcluster */
    203 	uao_releasepg		/* releasepg */
    204 };
    205 
    206 /*
    207  * uao_list: global list of active aobjs, locked by uao_list_lock
    208  */
    209 
    210 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    211 static simple_lock_data_t uao_list_lock;
    212 
    213 
    214 /*
    215  * functions
    216  */
    217 
    218 /*
    219  * hash table/array related functions
    220  */
    221 
    222 /*
    223  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    224  * offset.
    225  *
    226  * => the object should be locked by the caller
    227  */
    228 
    229 static struct uao_swhash_elt *
    230 uao_find_swhash_elt(aobj, pageidx, create)
    231 	struct uvm_aobj *aobj;
    232 	int pageidx;
    233 	boolean_t create;
    234 {
    235 	struct uao_swhash *swhash;
    236 	struct uao_swhash_elt *elt;
    237 	voff_t page_tag;
    238 
    239 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
    240 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
    241 
    242 	/*
    243 	 * now search the bucket for the requested tag
    244 	 */
    245 	LIST_FOREACH(elt, swhash, list) {
    246 		if (elt->tag == page_tag)
    247 			return(elt);
    248 	}
    249 
    250 	/* fail now if we are not allowed to create a new entry in the bucket */
    251 	if (!create)
    252 		return NULL;
    253 
    254 
    255 	/*
    256 	 * allocate a new entry for the bucket and init/insert it in
    257 	 */
    258 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
    259 	LIST_INSERT_HEAD(swhash, elt, list);
    260 	elt->tag = page_tag;
    261 	elt->count = 0;
    262 	memset(elt->slots, 0, sizeof(elt->slots));
    263 
    264 	return(elt);
    265 }
    266 
    267 /*
    268  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    269  *
    270  * => object must be locked by caller
    271  */
    272 __inline static int
    273 uao_find_swslot(aobj, pageidx)
    274 	struct uvm_aobj *aobj;
    275 	int pageidx;
    276 {
    277 
    278 	/*
    279 	 * if noswap flag is set, then we never return a slot
    280 	 */
    281 
    282 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    283 		return(0);
    284 
    285 	/*
    286 	 * if hashing, look in hash table.
    287 	 */
    288 
    289 	if (UAO_USES_SWHASH(aobj)) {
    290 		struct uao_swhash_elt *elt =
    291 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    292 
    293 		if (elt)
    294 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    295 		else
    296 			return(0);
    297 	}
    298 
    299 	/*
    300 	 * otherwise, look in the array
    301 	 */
    302 	return(aobj->u_swslots[pageidx]);
    303 }
    304 
    305 /*
    306  * uao_set_swslot: set the swap slot for a page in an aobj.
    307  *
    308  * => setting a slot to zero frees the slot
    309  * => object must be locked by caller
    310  */
    311 int
    312 uao_set_swslot(uobj, pageidx, slot)
    313 	struct uvm_object *uobj;
    314 	int pageidx, slot;
    315 {
    316 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    317 	int oldslot;
    318 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    319 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    320 	    aobj, pageidx, slot, 0);
    321 
    322 	/*
    323 	 * if noswap flag is set, then we can't set a slot
    324 	 */
    325 
    326 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    327 
    328 		if (slot == 0)
    329 			return(0);		/* a clear is ok */
    330 
    331 		/* but a set is not */
    332 		printf("uao_set_swslot: uobj = %p\n", uobj);
    333 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    334 	}
    335 
    336 	/*
    337 	 * are we using a hash table?  if so, add it in the hash.
    338 	 */
    339 
    340 	if (UAO_USES_SWHASH(aobj)) {
    341 		/*
    342 		 * Avoid allocating an entry just to free it again if
    343 		 * the page had not swap slot in the first place, and
    344 		 * we are freeing.
    345 		 */
    346 		struct uao_swhash_elt *elt =
    347 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    348 		if (elt == NULL) {
    349 #ifdef DIAGNOSTIC
    350 			if (slot)
    351 				panic("uao_set_swslot: didn't create elt");
    352 #endif
    353 			return (0);
    354 		}
    355 
    356 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    357 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    358 
    359 		/*
    360 		 * now adjust the elt's reference counter and free it if we've
    361 		 * dropped it to zero.
    362 		 */
    363 
    364 		/* an allocation? */
    365 		if (slot) {
    366 			if (oldslot == 0)
    367 				elt->count++;
    368 		} else {		/* freeing slot ... */
    369 			if (oldslot)	/* to be safe */
    370 				elt->count--;
    371 
    372 			if (elt->count == 0) {
    373 				LIST_REMOVE(elt, list);
    374 				pool_put(&uao_swhash_elt_pool, elt);
    375 			}
    376 		}
    377 	} else {
    378 		/* we are using an array */
    379 		oldslot = aobj->u_swslots[pageidx];
    380 		aobj->u_swslots[pageidx] = slot;
    381 	}
    382 	return (oldslot);
    383 }
    384 
    385 /*
    386  * end of hash/array functions
    387  */
    388 
    389 /*
    390  * uao_free: free all resources held by an aobj, and then free the aobj
    391  *
    392  * => the aobj should be dead
    393  */
    394 static void
    395 uao_free(aobj)
    396 	struct uvm_aobj *aobj;
    397 {
    398 
    399 	simple_unlock(&aobj->u_obj.vmobjlock);
    400 
    401 	if (UAO_USES_SWHASH(aobj)) {
    402 		int i, hashbuckets = aobj->u_swhashmask + 1;
    403 
    404 		/*
    405 		 * free the swslots from each hash bucket,
    406 		 * then the hash bucket, and finally the hash table itself.
    407 		 */
    408 		for (i = 0; i < hashbuckets; i++) {
    409 			struct uao_swhash_elt *elt, *next;
    410 
    411 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
    412 			     elt != NULL;
    413 			     elt = next) {
    414 				int j;
    415 
    416 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
    417 					int slot = elt->slots[j];
    418 
    419 					if (slot == 0) {
    420 						continue;
    421 					}
    422 					uvm_swap_free(slot, 1);
    423 
    424 					/*
    425 					 * this page is no longer
    426 					 * only in swap.
    427 					 */
    428 					simple_lock(&uvm.swap_data_lock);
    429 					uvmexp.swpgonly--;
    430 					simple_unlock(&uvm.swap_data_lock);
    431 				}
    432 
    433 				next = LIST_NEXT(elt, list);
    434 				pool_put(&uao_swhash_elt_pool, elt);
    435 			}
    436 		}
    437 		free(aobj->u_swhash, M_UVMAOBJ);
    438 	} else {
    439 		int i;
    440 
    441 		/*
    442 		 * free the array
    443 		 */
    444 
    445 		for (i = 0; i < aobj->u_pages; i++) {
    446 			int slot = aobj->u_swslots[i];
    447 
    448 			if (slot) {
    449 				uvm_swap_free(slot, 1);
    450 
    451 				/* this page is no longer only in swap. */
    452 				simple_lock(&uvm.swap_data_lock);
    453 				uvmexp.swpgonly--;
    454 				simple_unlock(&uvm.swap_data_lock);
    455 			}
    456 		}
    457 		free(aobj->u_swslots, M_UVMAOBJ);
    458 	}
    459 
    460 	/*
    461 	 * finally free the aobj itself
    462 	 */
    463 	pool_put(&uvm_aobj_pool, aobj);
    464 }
    465 
    466 /*
    467  * pager functions
    468  */
    469 
    470 /*
    471  * uao_create: create an aobj of the given size and return its uvm_object.
    472  *
    473  * => for normal use, flags are always zero
    474  * => for the kernel object, the flags are:
    475  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    476  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    477  */
    478 struct uvm_object *
    479 uao_create(size, flags)
    480 	vsize_t size;
    481 	int flags;
    482 {
    483 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
    484 	static int kobj_alloced = 0;			/* not allocated yet */
    485 	int pages = round_page(size) >> PAGE_SHIFT;
    486 	struct uvm_aobj *aobj;
    487 
    488 	/*
    489 	 * malloc a new aobj unless we are asked for the kernel object
    490 	 */
    491 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    492 		if (kobj_alloced)
    493 			panic("uao_create: kernel object already allocated");
    494 
    495 		aobj = &kernel_object_store;
    496 		aobj->u_pages = pages;
    497 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    498 		/* we are special, we never die */
    499 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    500 		kobj_alloced = UAO_FLAG_KERNOBJ;
    501 	} else if (flags & UAO_FLAG_KERNSWAP) {
    502 		aobj = &kernel_object_store;
    503 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    504 		    panic("uao_create: asked to enable swap on kernel object");
    505 		kobj_alloced = UAO_FLAG_KERNSWAP;
    506 	} else {	/* normal object */
    507 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    508 		aobj->u_pages = pages;
    509 		aobj->u_flags = 0;		/* normal object */
    510 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    511 	}
    512 
    513 	/*
    514  	 * allocate hash/array if necessary
    515  	 *
    516  	 * note: in the KERNSWAP case no need to worry about locking since
    517  	 * we are still booting we should be the only thread around.
    518  	 */
    519 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    520 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    521 		    M_NOWAIT : M_WAITOK;
    522 
    523 		/* allocate hash table or array depending on object size */
    524 		if (UAO_USES_SWHASH(aobj)) {
    525 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    526 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    527 			if (aobj->u_swhash == NULL)
    528 				panic("uao_create: hashinit swhash failed");
    529 		} else {
    530 			aobj->u_swslots = malloc(pages * sizeof(int),
    531 			    M_UVMAOBJ, mflags);
    532 			if (aobj->u_swslots == NULL)
    533 				panic("uao_create: malloc swslots failed");
    534 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    535 		}
    536 
    537 		if (flags) {
    538 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    539 			return(&aobj->u_obj);
    540 			/* done! */
    541 		}
    542 	}
    543 
    544 	/*
    545  	 * init aobj fields
    546  	 */
    547 	simple_lock_init(&aobj->u_obj.vmobjlock);
    548 	aobj->u_obj.pgops = &aobj_pager;
    549 	TAILQ_INIT(&aobj->u_obj.memq);
    550 	aobj->u_obj.uo_npages = 0;
    551 
    552 	/*
    553  	 * now that aobj is ready, add it to the global list
    554  	 */
    555 	simple_lock(&uao_list_lock);
    556 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    557 	simple_unlock(&uao_list_lock);
    558 
    559 	/*
    560  	 * done!
    561  	 */
    562 	return(&aobj->u_obj);
    563 }
    564 
    565 
    566 
    567 /*
    568  * uao_init: set up aobj pager subsystem
    569  *
    570  * => called at boot time from uvm_pager_init()
    571  */
    572 void
    573 uao_init()
    574 {
    575 	static int uao_initialized;
    576 
    577 	if (uao_initialized)
    578 		return;
    579 	uao_initialized = TRUE;
    580 
    581 	LIST_INIT(&uao_list);
    582 	simple_lock_init(&uao_list_lock);
    583 
    584 	/*
    585 	 * NOTE: Pages fror this pool must not come from a pageable
    586 	 * kernel map!
    587 	 */
    588 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    589 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    590 
    591 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    592 	    "aobjpl", 0,
    593 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    594 }
    595 
    596 /*
    597  * uao_reference: add a ref to an aobj
    598  *
    599  * => aobj must be unlocked
    600  * => just lock it and call the locked version
    601  */
    602 void
    603 uao_reference(uobj)
    604 	struct uvm_object *uobj;
    605 {
    606 	simple_lock(&uobj->vmobjlock);
    607 	uao_reference_locked(uobj);
    608 	simple_unlock(&uobj->vmobjlock);
    609 }
    610 
    611 /*
    612  * uao_reference_locked: add a ref to an aobj that is already locked
    613  *
    614  * => aobj must be locked
    615  * this needs to be separate from the normal routine
    616  * since sometimes we need to add a reference to an aobj when
    617  * it's already locked.
    618  */
    619 void
    620 uao_reference_locked(uobj)
    621 	struct uvm_object *uobj;
    622 {
    623 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    624 
    625 	/*
    626  	 * kernel_object already has plenty of references, leave it alone.
    627  	 */
    628 
    629 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    630 		return;
    631 
    632 	uobj->uo_refs++;		/* bump! */
    633 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    634 		    uobj, uobj->uo_refs,0,0);
    635 }
    636 
    637 
    638 /*
    639  * uao_detach: drop a reference to an aobj
    640  *
    641  * => aobj must be unlocked
    642  * => just lock it and call the locked version
    643  */
    644 void
    645 uao_detach(uobj)
    646 	struct uvm_object *uobj;
    647 {
    648 	simple_lock(&uobj->vmobjlock);
    649 	uao_detach_locked(uobj);
    650 }
    651 
    652 
    653 /*
    654  * uao_detach_locked: drop a reference to an aobj
    655  *
    656  * => aobj must be locked, and is unlocked (or freed) upon return.
    657  * this needs to be separate from the normal routine
    658  * since sometimes we need to detach from an aobj when
    659  * it's already locked.
    660  */
    661 void
    662 uao_detach_locked(uobj)
    663 	struct uvm_object *uobj;
    664 {
    665 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    666 	struct vm_page *pg;
    667 	boolean_t busybody;
    668 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    669 
    670 	/*
    671  	 * detaching from kernel_object is a noop.
    672  	 */
    673 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    674 		simple_unlock(&uobj->vmobjlock);
    675 		return;
    676 	}
    677 
    678 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    679 	uobj->uo_refs--;				/* drop ref! */
    680 	if (uobj->uo_refs) {				/* still more refs? */
    681 		simple_unlock(&uobj->vmobjlock);
    682 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    683 		return;
    684 	}
    685 
    686 	/*
    687  	 * remove the aobj from the global list.
    688  	 */
    689 	simple_lock(&uao_list_lock);
    690 	LIST_REMOVE(aobj, u_list);
    691 	simple_unlock(&uao_list_lock);
    692 
    693 	/*
    694  	 * free all the pages that aren't PG_BUSY,
    695 	 * mark for release any that are.
    696  	 */
    697 	busybody = FALSE;
    698 	for (pg = TAILQ_FIRST(&uobj->memq);
    699 	     pg != NULL;
    700 	     pg = TAILQ_NEXT(pg, listq)) {
    701 		if (pg->flags & PG_BUSY) {
    702 			pg->flags |= PG_RELEASED;
    703 			busybody = TRUE;
    704 			continue;
    705 		}
    706 
    707 		/* zap the mappings, free the swap slot, free the page */
    708 		pmap_page_protect(pg, VM_PROT_NONE);
    709 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    710 		uvm_lock_pageq();
    711 		uvm_pagefree(pg);
    712 		uvm_unlock_pageq();
    713 	}
    714 
    715 	/*
    716  	 * if we found any busy pages, we're done for now.
    717  	 * mark the aobj for death, releasepg will finish up for us.
    718  	 */
    719 	if (busybody) {
    720 		aobj->u_flags |= UAO_FLAG_KILLME;
    721 		simple_unlock(&aobj->u_obj.vmobjlock);
    722 		return;
    723 	}
    724 
    725 	/*
    726  	 * finally, free the rest.
    727  	 */
    728 	uao_free(aobj);
    729 }
    730 
    731 /*
    732  * uao_flush: "flush" pages out of a uvm object
    733  *
    734  * => object should be locked by caller.  we may _unlock_ the object
    735  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    736  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    737  *	XXXJRT a page, we simply just deactivate it.  Should probably
    738  *	XXXJRT handle this better, in the future (although "flushing"
    739  *	XXXJRT anonymous memory isn't terribly important).
    740  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    741  *	or block.
    742  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    743  *	for flushing.
    744  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    745  *	that new pages are inserted on the tail end of the list.  thus,
    746  *	we can make a complete pass through the object in one go by starting
    747  *	at the head and working towards the tail (new pages are put in
    748  *	front of us).
    749  * => NOTE: we are allowed to lock the page queues, so the caller
    750  *	must not be holding the lock on them [e.g. pagedaemon had
    751  *	better not call us with the queues locked]
    752  * => we return TRUE unless we encountered some sort of I/O error
    753  *	XXXJRT currently never happens, as we never directly initiate
    754  *	XXXJRT I/O
    755  *
    756  * comment on "cleaning" object and PG_BUSY pages:
    757  *	this routine is holding the lock on the object.  the only time
    758  *	that is can run into a PG_BUSY page that it does not own is if
    759  *	some other process has started I/O on the page (e.g. either
    760  *	a pagein or a pageout).  if the PG_BUSY page is being paged
    761  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    762  *	had a change to modify it yet.  if the PG_BUSY page is being
    763  *	paged out then it means that someone else has already started
    764  *	cleaning the page for us (how nice!).  in this case, if we
    765  *	have syncio specified, then after we make our pass through the
    766  *	object we need to wait for the other PG_BUSY pages to clear
    767  *	off (i.e. we need to do an iosync).  also note that once a
    768  *	page is PG_BUSY is must stary in its object until it is un-busyed.
    769  *	XXXJRT We never actually do this, as we are "flushing" anonymous
    770  *	XXXJRT memory, which doesn't have persistent backing store.
    771  *
    772  * note on page traversal:
    773  *	we can traverse the pages in an object either by going down the
    774  *	linked list in "uobj->memq", or we can go over the address range
    775  *	by page doing hash table lookups for each address.  depending
    776  *	on how many pages are in the object it may be cheaper to do one
    777  *	or the other.  we set "by_list" to true if we are using memq.
    778  *	if the cost of a hash lookup was equal to the cost of the list
    779  *	traversal we could compare the number of pages in the start->stop
    780  *	range to the total number of pages in the object.  however, it
    781  *	seems that a hash table lookup is more expensive than the linked
    782  *	list traversal, so we multiply the number of pages in the
    783  *	start->stop range by a penalty which we define below.
    784  */
    785 
    786 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
    787 
    788 boolean_t
    789 uao_flush(uobj, start, stop, flags)
    790 	struct uvm_object *uobj;
    791 	voff_t start, stop;
    792 	int flags;
    793 {
    794 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
    795 	struct vm_page *pp, *ppnext;
    796 	boolean_t retval, by_list;
    797 	voff_t curoff;
    798 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
    799 
    800 	curoff = 0;	/* XXX: shut up gcc */
    801 
    802 	retval = TRUE;	/* default to success */
    803 
    804 	if (flags & PGO_ALLPAGES) {
    805 		start = 0;
    806 		stop = aobj->u_pages << PAGE_SHIFT;
    807 		by_list = TRUE;		/* always go by the list */
    808 	} else {
    809 		start = trunc_page(start);
    810 		stop = round_page(stop);
    811 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    812 			printf("uao_flush: strange, got an out of range "
    813 			    "flush (fixed)\n");
    814 			stop = aobj->u_pages << PAGE_SHIFT;
    815 		}
    816 		by_list = (uobj->uo_npages <=
    817 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
    818 	}
    819 
    820 	UVMHIST_LOG(maphist,
    821 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    822 	    start, stop, by_list, flags);
    823 
    824 	/*
    825 	 * Don't need to do any work here if we're not freeing
    826 	 * or deactivating pages.
    827 	 */
    828 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    829 		UVMHIST_LOG(maphist,
    830 		    "<- done (no work to do)",0,0,0,0);
    831 		return (retval);
    832 	}
    833 
    834 	/*
    835 	 * now do it.  note: we must update ppnext in the body of loop or we
    836 	 * will get stuck.  we need to use ppnext because we may free "pp"
    837 	 * before doing the next loop.
    838 	 */
    839 
    840 	if (by_list) {
    841 		pp = uobj->memq.tqh_first;
    842 	} else {
    843 		curoff = start;
    844 		pp = uvm_pagelookup(uobj, curoff);
    845 	}
    846 
    847 	ppnext = NULL;	/* XXX: shut up gcc */
    848 	uvm_lock_pageq();	/* page queues locked */
    849 
    850 	/* locked: both page queues and uobj */
    851 	for ( ; (by_list && pp != NULL) ||
    852 	    (!by_list && curoff < stop) ; pp = ppnext) {
    853 		if (by_list) {
    854 			ppnext = TAILQ_NEXT(pp, listq);
    855 
    856 			/* range check */
    857 			if (pp->offset < start || pp->offset >= stop)
    858 				continue;
    859 		} else {
    860 			curoff += PAGE_SIZE;
    861 			if (curoff < stop)
    862 				ppnext = uvm_pagelookup(uobj, curoff);
    863 
    864 			/* null check */
    865 			if (pp == NULL)
    866 				continue;
    867 		}
    868 
    869 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    870 		/*
    871 		 * XXX In these first 3 cases, we always just
    872 		 * XXX deactivate the page.  We may want to
    873 		 * XXX handle the different cases more specifically
    874 		 * XXX in the future.
    875 		 */
    876 		case PGO_CLEANIT|PGO_FREE:
    877 		case PGO_CLEANIT|PGO_DEACTIVATE:
    878 		case PGO_DEACTIVATE:
    879  deactivate_it:
    880 			/* skip the page if it's loaned or wired */
    881 			if (pp->loan_count != 0 ||
    882 			    pp->wire_count != 0)
    883 				continue;
    884 
    885 			/* ...and deactivate the page. */
    886 			pmap_clear_reference(pp);
    887 			uvm_pagedeactivate(pp);
    888 
    889 			continue;
    890 
    891 		case PGO_FREE:
    892 			/*
    893 			 * If there are multiple references to
    894 			 * the object, just deactivate the page.
    895 			 */
    896 			if (uobj->uo_refs > 1)
    897 				goto deactivate_it;
    898 
    899 			/* XXX skip the page if it's loaned or wired */
    900 			if (pp->loan_count != 0 ||
    901 			    pp->wire_count != 0)
    902 				continue;
    903 
    904 			/*
    905 			 * mark the page as released if its busy.
    906 			 */
    907 			if (pp->flags & PG_BUSY) {
    908 				pp->flags |= PG_RELEASED;
    909 				continue;
    910 			}
    911 
    912 			/* zap all mappings for the page. */
    913 			pmap_page_protect(pp, VM_PROT_NONE);
    914 
    915 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
    916 			uvm_pagefree(pp);
    917 
    918 			continue;
    919 
    920 		default:
    921 			panic("uao_flush: weird flags");
    922 		}
    923 #ifdef DIAGNOSTIC
    924 		panic("uao_flush: unreachable code");
    925 #endif
    926 	}
    927 
    928 	uvm_unlock_pageq();
    929 
    930 	UVMHIST_LOG(maphist,
    931 	    "<- done, rv=%d",retval,0,0,0);
    932 	return (retval);
    933 }
    934 
    935 /*
    936  * uao_get: fetch me a page
    937  *
    938  * we have three cases:
    939  * 1: page is resident     -> just return the page.
    940  * 2: page is zero-fill    -> allocate a new page and zero it.
    941  * 3: page is swapped out  -> fetch the page from swap.
    942  *
    943  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    944  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    945  * then we will need to return VM_PAGER_UNLOCK.
    946  *
    947  * => prefer map unlocked (not required)
    948  * => object must be locked!  we will _unlock_ it before starting any I/O.
    949  * => flags: PGO_ALLPAGES: get all of the pages
    950  *           PGO_LOCKED: fault data structures are locked
    951  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    952  * => NOTE: caller must check for released pages!!
    953  */
    954 static int
    955 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    956 	struct uvm_object *uobj;
    957 	voff_t offset;
    958 	struct vm_page **pps;
    959 	int *npagesp;
    960 	int centeridx, advice, flags;
    961 	vm_prot_t access_type;
    962 {
    963 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    964 	voff_t current_offset;
    965 	vm_page_t ptmp;
    966 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
    967 	boolean_t done;
    968 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    969 
    970 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    971 		    aobj, offset, flags,0);
    972 
    973 	/*
    974  	 * get number of pages
    975  	 */
    976 	maxpages = *npagesp;
    977 
    978 	/*
    979  	 * step 1: handled the case where fault data structures are locked.
    980  	 */
    981 
    982 	if (flags & PGO_LOCKED) {
    983 		/*
    984  		 * step 1a: get pages that are already resident.   only do
    985 		 * this if the data structures are locked (i.e. the first
    986 		 * time through).
    987  		 */
    988 
    989 		done = TRUE;	/* be optimistic */
    990 		gotpages = 0;	/* # of pages we got so far */
    991 
    992 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    993 		    lcv++, current_offset += PAGE_SIZE) {
    994 			/* do we care about this page?  if not, skip it */
    995 			if (pps[lcv] == PGO_DONTCARE)
    996 				continue;
    997 
    998 			ptmp = uvm_pagelookup(uobj, current_offset);
    999 
   1000 			/*
   1001  			 * if page is new, attempt to allocate the page,
   1002 			 * zero-fill'd.
   1003  			 */
   1004 			if (ptmp == NULL && uao_find_swslot(aobj,
   1005 			    current_offset >> PAGE_SHIFT) == 0) {
   1006 				ptmp = uvm_pagealloc(uobj, current_offset,
   1007 				    NULL, UVM_PGA_ZERO);
   1008 				if (ptmp) {
   1009 					/* new page */
   1010 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
   1011 					ptmp->pqflags |= PQ_AOBJ;
   1012 					UVM_PAGE_OWN(ptmp, NULL);
   1013 				}
   1014 			}
   1015 
   1016 			/*
   1017 			 * to be useful must get a non-busy, non-released page
   1018 			 */
   1019 			if (ptmp == NULL ||
   1020 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1021 				if (lcv == centeridx ||
   1022 				    (flags & PGO_ALLPAGES) != 0)
   1023 					/* need to do a wait or I/O! */
   1024 					done = FALSE;
   1025 					continue;
   1026 			}
   1027 
   1028 			/*
   1029 			 * useful page: busy/lock it and plug it in our
   1030 			 * result array
   1031 			 */
   1032 			/* caller must un-busy this page */
   1033 			ptmp->flags |= PG_BUSY;
   1034 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1035 			pps[lcv] = ptmp;
   1036 			gotpages++;
   1037 
   1038 		}	/* "for" lcv loop */
   1039 
   1040 		/*
   1041  		 * step 1b: now we've either done everything needed or we
   1042 		 * to unlock and do some waiting or I/O.
   1043  		 */
   1044 
   1045 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1046 
   1047 		*npagesp = gotpages;
   1048 		if (done)
   1049 			/* bingo! */
   1050 			return(VM_PAGER_OK);
   1051 		else
   1052 			/* EEK!   Need to unlock and I/O */
   1053 			return(VM_PAGER_UNLOCK);
   1054 	}
   1055 
   1056 	/*
   1057  	 * step 2: get non-resident or busy pages.
   1058  	 * object is locked.   data structures are unlocked.
   1059  	 */
   1060 
   1061 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1062 	    lcv++, current_offset += PAGE_SIZE) {
   1063 
   1064 		/*
   1065 		 * - skip over pages we've already gotten or don't want
   1066 		 * - skip over pages we don't _have_ to get
   1067 		 */
   1068 
   1069 		if (pps[lcv] != NULL ||
   1070 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1071 			continue;
   1072 
   1073 		pageidx = current_offset >> PAGE_SHIFT;
   1074 
   1075 		/*
   1076  		 * we have yet to locate the current page (pps[lcv]).   we
   1077 		 * first look for a page that is already at the current offset.
   1078 		 * if we find a page, we check to see if it is busy or
   1079 		 * released.  if that is the case, then we sleep on the page
   1080 		 * until it is no longer busy or released and repeat the lookup.
   1081 		 * if the page we found is neither busy nor released, then we
   1082 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1083 		 * 'break's the following while loop and indicates we are
   1084 		 * ready to move on to the next page in the "lcv" loop above.
   1085  		 *
   1086  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1087 		 * then it means that we allocated a new busy/fake/clean page
   1088 		 * ptmp in the object and we need to do I/O to fill in the data.
   1089  		 */
   1090 
   1091 		/* top of "pps" while loop */
   1092 		while (pps[lcv] == NULL) {
   1093 			/* look for a resident page */
   1094 			ptmp = uvm_pagelookup(uobj, current_offset);
   1095 
   1096 			/* not resident?   allocate one now (if we can) */
   1097 			if (ptmp == NULL) {
   1098 
   1099 				ptmp = uvm_pagealloc(uobj, current_offset,
   1100 				    NULL, 0);
   1101 
   1102 				/* out of RAM? */
   1103 				if (ptmp == NULL) {
   1104 					simple_unlock(&uobj->vmobjlock);
   1105 					UVMHIST_LOG(pdhist,
   1106 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1107 					uvm_wait("uao_getpage");
   1108 					simple_lock(&uobj->vmobjlock);
   1109 					/* goto top of pps while loop */
   1110 					continue;
   1111 				}
   1112 
   1113 				/*
   1114 				 * safe with PQ's unlocked: because we just
   1115 				 * alloc'd the page
   1116 				 */
   1117 				ptmp->pqflags |= PQ_AOBJ;
   1118 
   1119 				/*
   1120 				 * got new page ready for I/O.  break pps while
   1121 				 * loop.  pps[lcv] is still NULL.
   1122 				 */
   1123 				break;
   1124 			}
   1125 
   1126 			/* page is there, see if we need to wait on it */
   1127 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1128 				ptmp->flags |= PG_WANTED;
   1129 				UVMHIST_LOG(pdhist,
   1130 				    "sleeping, ptmp->flags 0x%x\n",
   1131 				    ptmp->flags,0,0,0);
   1132 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1133 				    FALSE, "uao_get", 0);
   1134 				simple_lock(&uobj->vmobjlock);
   1135 				continue;	/* goto top of pps while loop */
   1136 			}
   1137 
   1138 			/*
   1139  			 * if we get here then the page has become resident and
   1140 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1141 			 * own it) and set pps[lcv] (so that we exit the while
   1142 			 * loop).
   1143  			 */
   1144 			/* we own it, caller must un-busy */
   1145 			ptmp->flags |= PG_BUSY;
   1146 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1147 			pps[lcv] = ptmp;
   1148 		}
   1149 
   1150 		/*
   1151  		 * if we own the valid page at the correct offset, pps[lcv] will
   1152  		 * point to it.   nothing more to do except go to the next page.
   1153  		 */
   1154 		if (pps[lcv])
   1155 			continue;			/* next lcv */
   1156 
   1157 		/*
   1158  		 * we have a "fake/busy/clean" page that we just allocated.
   1159  		 * do the needed "i/o", either reading from swap or zeroing.
   1160  		 */
   1161 		swslot = uao_find_swslot(aobj, pageidx);
   1162 
   1163 		/*
   1164  		 * just zero the page if there's nothing in swap.
   1165  		 */
   1166 		if (swslot == 0)
   1167 		{
   1168 			/*
   1169 			 * page hasn't existed before, just zero it.
   1170 			 */
   1171 			uvm_pagezero(ptmp);
   1172 		} else {
   1173 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1174 			     swslot, 0,0,0);
   1175 
   1176 			/*
   1177 			 * page in the swapped-out page.
   1178 			 * unlock object for i/o, relock when done.
   1179 			 */
   1180 			simple_unlock(&uobj->vmobjlock);
   1181 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1182 			simple_lock(&uobj->vmobjlock);
   1183 
   1184 			/*
   1185 			 * I/O done.  check for errors.
   1186 			 */
   1187 			if (rv != VM_PAGER_OK)
   1188 			{
   1189 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1190 				    rv,0,0,0);
   1191 				if (ptmp->flags & PG_WANTED)
   1192 					wakeup(ptmp);
   1193 
   1194 				/*
   1195 				 * remove the swap slot from the aobj
   1196 				 * and mark the aobj as having no real slot.
   1197 				 * don't free the swap slot, thus preventing
   1198 				 * it from being used again.
   1199 				 */
   1200 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1201 							SWSLOT_BAD);
   1202 				uvm_swap_markbad(swslot, 1);
   1203 
   1204 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
   1205 				UVM_PAGE_OWN(ptmp, NULL);
   1206 				uvm_lock_pageq();
   1207 				uvm_pagefree(ptmp);
   1208 				uvm_unlock_pageq();
   1209 
   1210 				simple_unlock(&uobj->vmobjlock);
   1211 				return (rv);
   1212 			}
   1213 		}
   1214 
   1215 		/*
   1216  		 * we got the page!   clear the fake flag (indicates valid
   1217 		 * data now in page) and plug into our result array.   note
   1218 		 * that page is still busy.
   1219  		 *
   1220  		 * it is the callers job to:
   1221  		 * => check if the page is released
   1222  		 * => unbusy the page
   1223  		 * => activate the page
   1224  		 */
   1225 
   1226 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1227 		pmap_clear_modify(ptmp);		/* ... and clean */
   1228 		pps[lcv] = ptmp;
   1229 
   1230 	}	/* lcv loop */
   1231 
   1232 	/*
   1233  	 * finally, unlock object and return.
   1234  	 */
   1235 
   1236 	simple_unlock(&uobj->vmobjlock);
   1237 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1238 	return(VM_PAGER_OK);
   1239 }
   1240 
   1241 /*
   1242  * uao_releasepg: handle released page in an aobj
   1243  *
   1244  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1245  *      to dispose of.
   1246  * => caller must handle PG_WANTED case
   1247  * => called with page's object locked, pageq's unlocked
   1248  * => returns TRUE if page's object is still alive, FALSE if we
   1249  *      killed the page's object.    if we return TRUE, then we
   1250  *      return with the object locked.
   1251  * => if (nextpgp != NULL) => we return the next page on the queue, and return
   1252  *                              with the page queues locked [for pagedaemon]
   1253  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1254  * => we kill the aobj if it is not referenced and we are suppose to
   1255  *      kill it ("KILLME").
   1256  */
   1257 static boolean_t
   1258 uao_releasepg(pg, nextpgp)
   1259 	struct vm_page *pg;
   1260 	struct vm_page **nextpgp;	/* OUT */
   1261 {
   1262 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1263 
   1264 #ifdef DIAGNOSTIC
   1265 	if ((pg->flags & PG_RELEASED) == 0)
   1266 		panic("uao_releasepg: page not released!");
   1267 #endif
   1268 
   1269 	/*
   1270  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1271  	 */
   1272 	pmap_page_protect(pg, VM_PROT_NONE);
   1273 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
   1274 	uvm_lock_pageq();
   1275 	if (nextpgp)
   1276 		*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
   1277 	uvm_pagefree(pg);
   1278 	if (!nextpgp)
   1279 		uvm_unlock_pageq();		/* keep locked for daemon */
   1280 
   1281 	/*
   1282  	 * if we're not killing the object, we're done.
   1283  	 */
   1284 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1285 		return TRUE;
   1286 	KASSERT(aobj->u_obj.uo_refs == 0);
   1287 
   1288 	/*
   1289  	 * if there are still pages in the object, we're done for now.
   1290  	 */
   1291 	if (aobj->u_obj.uo_npages != 0)
   1292 		return TRUE;
   1293 
   1294 #ifdef DIAGNOSTIC
   1295 	if (TAILQ_FIRST(&aobj->u_obj.memq))
   1296 		panic("uvn_releasepg: pages in object with npages == 0");
   1297 #endif
   1298 
   1299 	/*
   1300  	 * finally, free the rest.
   1301  	 */
   1302 	uao_free(aobj);
   1303 
   1304 	return FALSE;
   1305 }
   1306 
   1307 
   1308 /*
   1309  * uao_dropswap:  release any swap resources from this aobj page.
   1310  *
   1311  * => aobj must be locked or have a reference count of 0.
   1312  */
   1313 
   1314 void
   1315 uao_dropswap(uobj, pageidx)
   1316 	struct uvm_object *uobj;
   1317 	int pageidx;
   1318 {
   1319 	int slot;
   1320 
   1321 	slot = uao_set_swslot(uobj, pageidx, 0);
   1322 	if (slot) {
   1323 		uvm_swap_free(slot, 1);
   1324 	}
   1325 }
   1326 
   1327 
   1328 /*
   1329  * page in every page in every aobj that is paged-out to a range of swslots.
   1330  *
   1331  * => nothing should be locked.
   1332  * => returns TRUE if pagein was aborted due to lack of memory.
   1333  */
   1334 boolean_t
   1335 uao_swap_off(startslot, endslot)
   1336 	int startslot, endslot;
   1337 {
   1338 	struct uvm_aobj *aobj, *nextaobj;
   1339 
   1340 	/*
   1341 	 * walk the list of all aobjs.
   1342 	 */
   1343 
   1344 restart:
   1345 	simple_lock(&uao_list_lock);
   1346 
   1347 	for (aobj = LIST_FIRST(&uao_list);
   1348 	     aobj != NULL;
   1349 	     aobj = nextaobj) {
   1350 		boolean_t rv;
   1351 
   1352 		/*
   1353 		 * try to get the object lock,
   1354 		 * start all over if we fail.
   1355 		 * most of the time we'll get the aobj lock,
   1356 		 * so this should be a rare case.
   1357 		 */
   1358 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
   1359 			simple_unlock(&uao_list_lock);
   1360 			goto restart;
   1361 		}
   1362 
   1363 		/*
   1364 		 * add a ref to the aobj so it doesn't disappear
   1365 		 * while we're working.
   1366 		 */
   1367 		uao_reference_locked(&aobj->u_obj);
   1368 
   1369 		/*
   1370 		 * now it's safe to unlock the uao list.
   1371 		 */
   1372 		simple_unlock(&uao_list_lock);
   1373 
   1374 		/*
   1375 		 * page in any pages in the swslot range.
   1376 		 * if there's an error, abort and return the error.
   1377 		 */
   1378 		rv = uao_pagein(aobj, startslot, endslot);
   1379 		if (rv) {
   1380 			uao_detach_locked(&aobj->u_obj);
   1381 			return rv;
   1382 		}
   1383 
   1384 		/*
   1385 		 * we're done with this aobj.
   1386 		 * relock the list and drop our ref on the aobj.
   1387 		 */
   1388 		simple_lock(&uao_list_lock);
   1389 		nextaobj = LIST_NEXT(aobj, u_list);
   1390 		uao_detach_locked(&aobj->u_obj);
   1391 	}
   1392 
   1393 	/*
   1394 	 * done with traversal, unlock the list
   1395 	 */
   1396 	simple_unlock(&uao_list_lock);
   1397 	return FALSE;
   1398 }
   1399 
   1400 
   1401 /*
   1402  * page in any pages from aobj in the given range.
   1403  *
   1404  * => aobj must be locked and is returned locked.
   1405  * => returns TRUE if pagein was aborted due to lack of memory.
   1406  */
   1407 static boolean_t
   1408 uao_pagein(aobj, startslot, endslot)
   1409 	struct uvm_aobj *aobj;
   1410 	int startslot, endslot;
   1411 {
   1412 	boolean_t rv;
   1413 
   1414 	if (UAO_USES_SWHASH(aobj)) {
   1415 		struct uao_swhash_elt *elt;
   1416 		int bucket;
   1417 
   1418 restart:
   1419 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
   1420 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
   1421 			     elt != NULL;
   1422 			     elt = LIST_NEXT(elt, list)) {
   1423 				int i;
   1424 
   1425 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1426 					int slot = elt->slots[i];
   1427 
   1428 					/*
   1429 					 * if the slot isn't in range, skip it.
   1430 					 */
   1431 					if (slot < startslot ||
   1432 					    slot >= endslot) {
   1433 						continue;
   1434 					}
   1435 
   1436 					/*
   1437 					 * process the page,
   1438 					 * the start over on this object
   1439 					 * since the swhash elt
   1440 					 * may have been freed.
   1441 					 */
   1442 					rv = uao_pagein_page(aobj,
   1443 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1444 					if (rv) {
   1445 						return rv;
   1446 					}
   1447 					goto restart;
   1448 				}
   1449 			}
   1450 		}
   1451 	} else {
   1452 		int i;
   1453 
   1454 		for (i = 0; i < aobj->u_pages; i++) {
   1455 			int slot = aobj->u_swslots[i];
   1456 
   1457 			/*
   1458 			 * if the slot isn't in range, skip it
   1459 			 */
   1460 			if (slot < startslot || slot >= endslot) {
   1461 				continue;
   1462 			}
   1463 
   1464 			/*
   1465 			 * process the page.
   1466 			 */
   1467 			rv = uao_pagein_page(aobj, i);
   1468 			if (rv) {
   1469 				return rv;
   1470 			}
   1471 		}
   1472 	}
   1473 
   1474 	return FALSE;
   1475 }
   1476 
   1477 /*
   1478  * page in a page from an aobj.  used for swap_off.
   1479  * returns TRUE if pagein was aborted due to lack of memory.
   1480  *
   1481  * => aobj must be locked and is returned locked.
   1482  */
   1483 static boolean_t
   1484 uao_pagein_page(aobj, pageidx)
   1485 	struct uvm_aobj *aobj;
   1486 	int pageidx;
   1487 {
   1488 	struct vm_page *pg;
   1489 	int rv, slot, npages;
   1490 
   1491 	pg = NULL;
   1492 	npages = 1;
   1493 	/* locked: aobj */
   1494 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1495 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
   1496 	/* unlocked: aobj */
   1497 
   1498 	/*
   1499 	 * relock and finish up.
   1500 	 */
   1501 	simple_lock(&aobj->u_obj.vmobjlock);
   1502 
   1503 	switch (rv) {
   1504 	case VM_PAGER_OK:
   1505 		break;
   1506 
   1507 	case VM_PAGER_ERROR:
   1508 	case VM_PAGER_REFAULT:
   1509 		/*
   1510 		 * nothing more to do on errors.
   1511 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
   1512 		 * so again there's nothing to do.
   1513 		 */
   1514 		return FALSE;
   1515 
   1516 #ifdef DIAGNOSTIC
   1517 	default:
   1518 		panic("uao_pagein_page: uao_get -> %d\n", rv);
   1519 #endif
   1520 	}
   1521 
   1522 #ifdef DIAGNOSTIC
   1523 	/*
   1524 	 * this should never happen, since we have a reference on the aobj.
   1525 	 */
   1526 	if (pg->flags & PG_RELEASED) {
   1527 		panic("uao_pagein_page: found PG_RELEASED page?\n");
   1528 	}
   1529 #endif
   1530 
   1531 	/*
   1532 	 * ok, we've got the page now.
   1533 	 * mark it as dirty, clear its swslot and un-busy it.
   1534 	 */
   1535 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
   1536 	uvm_swap_free(slot, 1);
   1537 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
   1538 	UVM_PAGE_OWN(pg, NULL);
   1539 
   1540 	/*
   1541 	 * deactivate the page (to put it on a page queue).
   1542 	 */
   1543 	pmap_clear_reference(pg);
   1544 	uvm_lock_pageq();
   1545 	uvm_pagedeactivate(pg);
   1546 	uvm_unlock_pageq();
   1547 
   1548 	return FALSE;
   1549 }
   1550