Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.17
      1 /*	$NetBSD: uvm_aobj.c,v 1.17 1999/03/25 18:48:49 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/pool.h>
     54 
     55 #include <vm/vm.h>
     56 #include <vm/vm_page.h>
     57 #include <vm/vm_kern.h>
     58 
     59 #include <uvm/uvm.h>
     60 
     61 /*
     62  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     63  * to keeping the list of resident pages, it also keeps a list of
     64  * allocated swap blocks.  depending on the size of the aobj this list
     65  * of allocated swap blocks is either stored in an array (small objects)
     66  * or in a hash table (large objects).
     67  */
     68 
     69 /*
     70  * local structures
     71  */
     72 
     73 /*
     74  * for hash tables, we break the address space of the aobj into blocks
     75  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     76  * be a power of two.
     77  */
     78 
     79 #define UAO_SWHASH_CLUSTER_SHIFT 4
     80 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     81 
     82 /* get the "tag" for this page index */
     83 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     84 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     85 
     86 /* given an ELT and a page index, find the swap slot */
     87 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     88 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     89 
     90 /* given an ELT, return its pageidx base */
     91 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     92 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     93 
     94 /*
     95  * the swhash hash function
     96  */
     97 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     98 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     99 			    & (AOBJ)->u_swhashmask)])
    100 
    101 /*
    102  * the swhash threshhold determines if we will use an array or a
    103  * hash table to store the list of allocated swap blocks.
    104  */
    105 
    106 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    107 #define UAO_USES_SWHASH(AOBJ) \
    108 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    109 
    110 /*
    111  * the number of buckets in a swhash, with an upper bound
    112  */
    113 #define UAO_SWHASH_MAXBUCKETS 256
    114 #define UAO_SWHASH_BUCKETS(AOBJ) \
    115 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    116 	     UAO_SWHASH_MAXBUCKETS))
    117 
    118 
    119 /*
    120  * uao_swhash_elt: when a hash table is being used, this structure defines
    121  * the format of an entry in the bucket list.
    122  */
    123 
    124 struct uao_swhash_elt {
    125 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    126 	vaddr_t tag;			/* our 'tag' */
    127 	int count;				/* our number of active slots */
    128 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    129 };
    130 
    131 /*
    132  * uao_swhash: the swap hash table structure
    133  */
    134 
    135 LIST_HEAD(uao_swhash, uao_swhash_elt);
    136 
    137 /*
    138  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    139  */
    140 
    141 struct pool uao_swhash_elt_pool;
    142 
    143 /*
    144  * uvm_aobj: the actual anon-backed uvm_object
    145  *
    146  * => the uvm_object is at the top of the structure, this allows
    147  *   (struct uvm_device *) == (struct uvm_object *)
    148  * => only one of u_swslots and u_swhash is used in any given aobj
    149  */
    150 
    151 struct uvm_aobj {
    152 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    153 	int u_pages;		 /* number of pages in entire object */
    154 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    155 	int *u_swslots;		 /* array of offset->swapslot mappings */
    156 				 /*
    157 				  * hashtable of offset->swapslot mappings
    158 				  * (u_swhash is an array of bucket heads)
    159 				  */
    160 	struct uao_swhash *u_swhash;
    161 	u_long u_swhashmask;		/* mask for hashtable */
    162 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    163 };
    164 
    165 /*
    166  * uvm_aobj_pool: pool of uvm_aobj structures
    167  */
    168 
    169 struct pool uvm_aobj_pool;
    170 
    171 /*
    172  * local functions
    173  */
    174 
    175 static void			 uao_init __P((void));
    176 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    177 							  int, boolean_t));
    178 static int			 uao_find_swslot __P((struct uvm_aobj *,
    179 						      int));
    180 static boolean_t		 uao_flush __P((struct uvm_object *,
    181 						vaddr_t, vaddr_t,
    182 						int));
    183 static void			 uao_free __P((struct uvm_aobj *));
    184 static int			 uao_get __P((struct uvm_object *, vaddr_t,
    185 					      vm_page_t *, int *, int,
    186 					      vm_prot_t, int, int));
    187 static boolean_t		 uao_releasepg __P((struct vm_page *,
    188 						    struct vm_page **));
    189 
    190 
    191 
    192 /*
    193  * aobj_pager
    194  *
    195  * note that some functions (e.g. put) are handled elsewhere
    196  */
    197 
    198 struct uvm_pagerops aobj_pager = {
    199 	uao_init,		/* init */
    200 	uao_reference,		/* reference */
    201 	uao_detach,		/* detach */
    202 	NULL,			/* fault */
    203 	uao_flush,		/* flush */
    204 	uao_get,		/* get */
    205 	NULL,			/* asyncget */
    206 	NULL,			/* put (done by pagedaemon) */
    207 	NULL,			/* cluster */
    208 	NULL,			/* mk_pcluster */
    209 	uvm_shareprot,		/* shareprot */
    210 	NULL,			/* aiodone */
    211 	uao_releasepg		/* releasepg */
    212 };
    213 
    214 /*
    215  * uao_list: global list of active aobjs, locked by uao_list_lock
    216  */
    217 
    218 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    219 static simple_lock_data_t uao_list_lock;
    220 
    221 
    222 /*
    223  * functions
    224  */
    225 
    226 /*
    227  * hash table/array related functions
    228  */
    229 
    230 /*
    231  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    232  * offset.
    233  *
    234  * => the object should be locked by the caller
    235  */
    236 
    237 static struct uao_swhash_elt *
    238 uao_find_swhash_elt(aobj, pageidx, create)
    239 	struct uvm_aobj *aobj;
    240 	int pageidx;
    241 	boolean_t create;
    242 {
    243 	struct uao_swhash *swhash;
    244 	struct uao_swhash_elt *elt;
    245 	int page_tag;
    246 
    247 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
    248 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
    249 
    250 	/*
    251 	 * now search the bucket for the requested tag
    252 	 */
    253 	for (elt = swhash->lh_first; elt != NULL; elt = elt->list.le_next) {
    254 		if (elt->tag == page_tag)
    255 			return(elt);
    256 	}
    257 
    258 	/* fail now if we are not allowed to create a new entry in the bucket */
    259 	if (!create)
    260 		return NULL;
    261 
    262 
    263 	/*
    264 	 * allocate a new entry for the bucket and init/insert it in
    265 	 */
    266 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
    267 	LIST_INSERT_HEAD(swhash, elt, list);
    268 	elt->tag = page_tag;
    269 	elt->count = 0;
    270 	memset(elt->slots, 0, sizeof(elt->slots));
    271 
    272 	return(elt);
    273 }
    274 
    275 /*
    276  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    277  *
    278  * => object must be locked by caller
    279  */
    280 __inline static int
    281 uao_find_swslot(aobj, pageidx)
    282 	struct uvm_aobj *aobj;
    283 	int pageidx;
    284 {
    285 
    286 	/*
    287 	 * if noswap flag is set, then we never return a slot
    288 	 */
    289 
    290 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    291 		return(0);
    292 
    293 	/*
    294 	 * if hashing, look in hash table.
    295 	 */
    296 
    297 	if (UAO_USES_SWHASH(aobj)) {
    298 		struct uao_swhash_elt *elt =
    299 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    300 
    301 		if (elt)
    302 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    303 		else
    304 			return(NULL);
    305 	}
    306 
    307 	/*
    308 	 * otherwise, look in the array
    309 	 */
    310 	return(aobj->u_swslots[pageidx]);
    311 }
    312 
    313 /*
    314  * uao_set_swslot: set the swap slot for a page in an aobj.
    315  *
    316  * => setting a slot to zero frees the slot
    317  * => object must be locked by caller
    318  */
    319 int
    320 uao_set_swslot(uobj, pageidx, slot)
    321 	struct uvm_object *uobj;
    322 	int pageidx, slot;
    323 {
    324 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    325 	int oldslot;
    326 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    327 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    328 	    aobj, pageidx, slot, 0);
    329 
    330 	/*
    331 	 * if noswap flag is set, then we can't set a slot
    332 	 */
    333 
    334 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    335 
    336 		if (slot == 0)
    337 			return(0);		/* a clear is ok */
    338 
    339 		/* but a set is not */
    340 		printf("uao_set_swslot: uobj = %p\n", uobj);
    341 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    342 	}
    343 
    344 	/*
    345 	 * are we using a hash table?  if so, add it in the hash.
    346 	 */
    347 
    348 	if (UAO_USES_SWHASH(aobj)) {
    349 		/*
    350 		 * Avoid allocating an entry just to free it again if
    351 		 * the page had not swap slot in the first place, and
    352 		 * we are freeing.
    353 		 */
    354 		struct uao_swhash_elt *elt =
    355 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    356 		if (elt == NULL) {
    357 #ifdef DIAGNOSTIC
    358 			if (slot)
    359 				panic("uao_set_swslot: didn't create elt");
    360 #endif
    361 			return (0);
    362 		}
    363 
    364 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    365 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    366 
    367 		/*
    368 		 * now adjust the elt's reference counter and free it if we've
    369 		 * dropped it to zero.
    370 		 */
    371 
    372 		/* an allocation? */
    373 		if (slot) {
    374 			if (oldslot == 0)
    375 				elt->count++;
    376 		} else {		/* freeing slot ... */
    377 			if (oldslot)	/* to be safe */
    378 				elt->count--;
    379 
    380 			if (elt->count == 0) {
    381 				LIST_REMOVE(elt, list);
    382 				pool_put(&uao_swhash_elt_pool, elt);
    383 			}
    384 		}
    385 
    386 	} else {
    387 		/* we are using an array */
    388 		oldslot = aobj->u_swslots[pageidx];
    389 		aobj->u_swslots[pageidx] = slot;
    390 	}
    391 	return (oldslot);
    392 }
    393 
    394 /*
    395  * end of hash/array functions
    396  */
    397 
    398 /*
    399  * uao_free: free all resources held by an aobj, and then free the aobj
    400  *
    401  * => the aobj should be dead
    402  */
    403 static void
    404 uao_free(aobj)
    405 	struct uvm_aobj *aobj;
    406 {
    407 
    408 	if (UAO_USES_SWHASH(aobj)) {
    409 		int i, hashbuckets = aobj->u_swhashmask + 1;
    410 
    411 		/*
    412 		 * free the swslots from each hash bucket,
    413 		 * then the hash bucket, and finally the hash table itself.
    414 		 */
    415 		for (i = 0; i < hashbuckets; i++) {
    416 			struct uao_swhash_elt *elt, *next;
    417 
    418 			for (elt = aobj->u_swhash[i].lh_first; elt != NULL;
    419 			    elt = next) {
    420 				int j;
    421 
    422 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++)
    423 				{
    424 					int slot = elt->slots[j];
    425 
    426 					if (slot)
    427 						uvm_swap_free(slot, 1);
    428 				}
    429 
    430 				next = elt->list.le_next;
    431 				pool_put(&uao_swhash_elt_pool, elt);
    432 			}
    433 		}
    434 		FREE(aobj->u_swhash, M_UVMAOBJ);
    435 	} else {
    436 		int i;
    437 
    438 		/*
    439 		 * free the array
    440 		 */
    441 
    442 		for (i = 0; i < aobj->u_pages; i++)
    443 		{
    444 			int slot = aobj->u_swslots[i];
    445 
    446 			if (slot)
    447 				uvm_swap_free(slot, 1);
    448 		}
    449 		FREE(aobj->u_swslots, M_UVMAOBJ);
    450 	}
    451 
    452 	/*
    453 	 * finally free the aobj itself
    454 	 */
    455 	pool_put(&uvm_aobj_pool, aobj);
    456 }
    457 
    458 /*
    459  * pager functions
    460  */
    461 
    462 /*
    463  * uao_create: create an aobj of the given size and return its uvm_object.
    464  *
    465  * => for normal use, flags are always zero
    466  * => for the kernel object, the flags are:
    467  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    468  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    469  */
    470 struct uvm_object *
    471 uao_create(size, flags)
    472 	vsize_t size;
    473 	int flags;
    474 {
    475 	static struct uvm_aobj kernel_object_store;	/* home of kernel_object */
    476 	static int kobj_alloced = 0;			/* not allocated yet */
    477 	int pages = round_page(size) >> PAGE_SHIFT;
    478 	struct uvm_aobj *aobj;
    479 
    480 	/*
    481  	* malloc a new aobj unless we are asked for the kernel object
    482  	*/
    483 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    484 		if (kobj_alloced)
    485 			panic("uao_create: kernel object already allocated");
    486 
    487 		/*
    488 		 * XXXTHORPEJ: Need to call this now, so the pool gets
    489 		 * initialized!
    490 		 */
    491 		uao_init();
    492 
    493 		aobj = &kernel_object_store;
    494 		aobj->u_pages = pages;
    495 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    496 		/* we are special, we never die */
    497 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    498 		kobj_alloced = UAO_FLAG_KERNOBJ;
    499 	} else if (flags & UAO_FLAG_KERNSWAP) {
    500 		aobj = &kernel_object_store;
    501 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    502 		    panic("uao_create: asked to enable swap on kernel object");
    503 		kobj_alloced = UAO_FLAG_KERNSWAP;
    504 	} else {	/* normal object */
    505 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    506 		aobj->u_pages = pages;
    507 		aobj->u_flags = 0;		/* normal object */
    508 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    509 	}
    510 
    511 	/*
    512  	 * allocate hash/array if necessary
    513  	 *
    514  	 * note: in the KERNSWAP case no need to worry about locking since
    515  	 * we are still booting we should be the only thread around.
    516  	 */
    517 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    518 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    519 		    M_NOWAIT : M_WAITOK;
    520 
    521 		/* allocate hash table or array depending on object size */
    522 			if (UAO_USES_SWHASH(aobj)) {
    523 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    524 			    M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    525 			if (aobj->u_swhash == NULL)
    526 				panic("uao_create: hashinit swhash failed");
    527 		} else {
    528 			MALLOC(aobj->u_swslots, int *, pages * sizeof(int),
    529 			    M_UVMAOBJ, mflags);
    530 			if (aobj->u_swslots == NULL)
    531 				panic("uao_create: malloc swslots failed");
    532 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    533 		}
    534 
    535 		if (flags) {
    536 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    537 			return(&aobj->u_obj);
    538 			/* done! */
    539 		}
    540 	}
    541 
    542 	/*
    543  	 * init aobj fields
    544  	 */
    545 	simple_lock_init(&aobj->u_obj.vmobjlock);
    546 	aobj->u_obj.pgops = &aobj_pager;
    547 	TAILQ_INIT(&aobj->u_obj.memq);
    548 	aobj->u_obj.uo_npages = 0;
    549 
    550 	/*
    551  	 * now that aobj is ready, add it to the global list
    552  	 * XXXCHS: uao_init hasn't been called'd in the KERNOBJ case,
    553 	 * do we really need the kernel object on this list anyway?
    554  	 */
    555 	simple_lock(&uao_list_lock);
    556 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    557 	simple_unlock(&uao_list_lock);
    558 
    559 	/*
    560  	 * done!
    561  	 */
    562 	return(&aobj->u_obj);
    563 }
    564 
    565 
    566 
    567 /*
    568  * uao_init: set up aobj pager subsystem
    569  *
    570  * => called at boot time from uvm_pager_init()
    571  */
    572 static void
    573 uao_init()
    574 {
    575 	static int uao_initialized;
    576 
    577 	if (uao_initialized)
    578 		return;
    579 	uao_initialized = TRUE;
    580 
    581 	LIST_INIT(&uao_list);
    582 	simple_lock_init(&uao_list_lock);
    583 
    584 	/*
    585 	 * NOTE: Pages fror this pool must not come from a pageable
    586 	 * kernel map!
    587 	 */
    588 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    589 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    590 
    591 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    592 	    "aobjpl", 0,
    593 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    594 }
    595 
    596 /*
    597  * uao_reference: add a ref to an aobj
    598  *
    599  * => aobj must be unlocked (we will lock it)
    600  */
    601 void
    602 uao_reference(uobj)
    603 	struct uvm_object *uobj;
    604 {
    605 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    606 
    607 	/*
    608  	 * kernel_object already has plenty of references, leave it alone.
    609  	 */
    610 
    611 	if (uobj->uo_refs == UVM_OBJ_KERN)
    612 		return;
    613 
    614 	simple_lock(&uobj->vmobjlock);
    615 	uobj->uo_refs++;		/* bump! */
    616 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    617 	uobj, uobj->uo_refs,0,0);
    618 	simple_unlock(&uobj->vmobjlock);
    619 }
    620 
    621 /*
    622  * uao_detach: drop a reference to an aobj
    623  *
    624  * => aobj must be unlocked, we will lock it
    625  */
    626 void
    627 uao_detach(uobj)
    628 	struct uvm_object *uobj;
    629 {
    630 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    631 	struct vm_page *pg;
    632 	boolean_t busybody;
    633 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    634 
    635 	/*
    636  	 * detaching from kernel_object is a noop.
    637  	 */
    638 	if (uobj->uo_refs == UVM_OBJ_KERN)
    639 		return;
    640 
    641 	simple_lock(&uobj->vmobjlock);
    642 
    643 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    644 	uobj->uo_refs--;				/* drop ref! */
    645 	if (uobj->uo_refs) {				/* still more refs? */
    646 		simple_unlock(&uobj->vmobjlock);
    647 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    648 		return;
    649 	}
    650 
    651 	/*
    652  	 * remove the aobj from the global list.
    653  	 */
    654 	simple_lock(&uao_list_lock);
    655 	LIST_REMOVE(aobj, u_list);
    656 	simple_unlock(&uao_list_lock);
    657 
    658 	/*
    659  	 * free all the pages that aren't PG_BUSY, mark for release any that are.
    660  	 */
    661 
    662 	busybody = FALSE;
    663 	for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) {
    664 		int swslot;
    665 
    666 		if (pg->flags & PG_BUSY) {
    667 			pg->flags |= PG_RELEASED;
    668 			busybody = TRUE;
    669 			continue;
    670 		}
    671 
    672 
    673 		/* zap the mappings, free the swap slot, free the page */
    674 		pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
    675 
    676 		swslot = uao_set_swslot(&aobj->u_obj,
    677 					pg->offset >> PAGE_SHIFT, 0);
    678 		if (swslot) {
    679 			uvm_swap_free(swslot, 1);
    680 		}
    681 
    682 		uvm_lock_pageq();
    683 		uvm_pagefree(pg);
    684 		uvm_unlock_pageq();
    685 	}
    686 
    687 	/*
    688  	 * if we found any busy pages, we're done for now.
    689  	 * mark the aobj for death, releasepg will finish up for us.
    690  	 */
    691 	if (busybody) {
    692 		aobj->u_flags |= UAO_FLAG_KILLME;
    693 		simple_unlock(&aobj->u_obj.vmobjlock);
    694 		return;
    695 	}
    696 
    697 	/*
    698  	 * finally, free the rest.
    699  	 */
    700 	uao_free(aobj);
    701 }
    702 
    703 /*
    704  * uao_flush: uh, yea, sure it's flushed.  really!
    705  */
    706 boolean_t
    707 uao_flush(uobj, start, end, flags)
    708 	struct uvm_object *uobj;
    709 	vaddr_t start, end;
    710 	int flags;
    711 {
    712 
    713 	/*
    714  	 * anonymous memory doesn't "flush"
    715  	 */
    716 	/*
    717  	 * XXX
    718  	 * deal with PGO_DEACTIVATE (for madvise(MADV_SEQUENTIAL))
    719  	 * and PGO_FREE (for msync(MSINVALIDATE))
    720  	 */
    721 	return TRUE;
    722 }
    723 
    724 /*
    725  * uao_get: fetch me a page
    726  *
    727  * we have three cases:
    728  * 1: page is resident     -> just return the page.
    729  * 2: page is zero-fill    -> allocate a new page and zero it.
    730  * 3: page is swapped out  -> fetch the page from swap.
    731  *
    732  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    733  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    734  * then we will need to return VM_PAGER_UNLOCK.
    735  *
    736  * => prefer map unlocked (not required)
    737  * => object must be locked!  we will _unlock_ it before starting any I/O.
    738  * => flags: PGO_ALLPAGES: get all of the pages
    739  *           PGO_LOCKED: fault data structures are locked
    740  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    741  * => NOTE: caller must check for released pages!!
    742  */
    743 static int
    744 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    745 	struct uvm_object *uobj;
    746 	vaddr_t offset;
    747 	struct vm_page **pps;
    748 	int *npagesp;
    749 	int centeridx, advice, flags;
    750 	vm_prot_t access_type;
    751 {
    752 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    753 	vaddr_t current_offset;
    754 	vm_page_t ptmp;
    755 	int lcv, gotpages, maxpages, swslot, rv;
    756 	boolean_t done;
    757 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    758 
    759 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0);
    760 
    761 	/*
    762  	 * get number of pages
    763  	 */
    764 
    765 	maxpages = *npagesp;
    766 
    767 	/*
    768  	 * step 1: handled the case where fault data structures are locked.
    769  	 */
    770 
    771 	if (flags & PGO_LOCKED) {
    772 
    773 		/*
    774  		 * step 1a: get pages that are already resident.   only do
    775 		 * this if the data structures are locked (i.e. the first
    776 		 * time through).
    777  		 */
    778 
    779 		done = TRUE;	/* be optimistic */
    780 		gotpages = 0;	/* # of pages we got so far */
    781 
    782 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    783 		    lcv++, current_offset += PAGE_SIZE) {
    784 			/* do we care about this page?  if not, skip it */
    785 			if (pps[lcv] == PGO_DONTCARE)
    786 				continue;
    787 
    788 			ptmp = uvm_pagelookup(uobj, current_offset);
    789 
    790 			/*
    791  			 * if page is new, attempt to allocate the page, then
    792 			 * zero-fill it.
    793  			 */
    794 			if (ptmp == NULL && uao_find_swslot(aobj,
    795 			    current_offset >> PAGE_SHIFT) == 0) {
    796 				ptmp = uvm_pagealloc(uobj, current_offset,
    797 				    NULL);
    798 				if (ptmp) {
    799 					/* new page */
    800 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
    801 					ptmp->pqflags |= PQ_AOBJ;
    802 					UVM_PAGE_OWN(ptmp, NULL);
    803 					uvm_pagezero(ptmp);
    804 				}
    805 			}
    806 
    807 			/*
    808 			 * to be useful must get a non-busy, non-released page
    809 			 */
    810 			if (ptmp == NULL ||
    811 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    812 				if (lcv == centeridx ||
    813 				    (flags & PGO_ALLPAGES) != 0)
    814 					/* need to do a wait or I/O! */
    815 					done = FALSE;
    816 					continue;
    817 			}
    818 
    819 			/*
    820 			 * useful page: busy/lock it and plug it in our
    821 			 * result array
    822 			 */
    823 			/* caller must un-busy this page */
    824 			ptmp->flags |= PG_BUSY;
    825 			UVM_PAGE_OWN(ptmp, "uao_get1");
    826 			pps[lcv] = ptmp;
    827 			gotpages++;
    828 
    829 		}	/* "for" lcv loop */
    830 
    831 		/*
    832  		 * step 1b: now we've either done everything needed or we
    833 		 * to unlock and do some waiting or I/O.
    834  		 */
    835 
    836 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
    837 
    838 		*npagesp = gotpages;
    839 		if (done)
    840 			/* bingo! */
    841 			return(VM_PAGER_OK);
    842 		else
    843 			/* EEK!   Need to unlock and I/O */
    844 			return(VM_PAGER_UNLOCK);
    845 	}
    846 
    847 	/*
    848  	 * step 2: get non-resident or busy pages.
    849  	 * object is locked.   data structures are unlocked.
    850  	 */
    851 
    852 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    853 	    lcv++, current_offset += PAGE_SIZE) {
    854 		/*
    855 		 * - skip over pages we've already gotten or don't want
    856 		 * - skip over pages we don't _have_ to get
    857 		 */
    858 		if (pps[lcv] != NULL ||
    859 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
    860 			continue;
    861 
    862 		/*
    863  		 * we have yet to locate the current page (pps[lcv]).   we
    864 		 * first look for a page that is already at the current offset.
    865 		 * if we find a page, we check to see if it is busy or
    866 		 * released.  if that is the case, then we sleep on the page
    867 		 * until it is no longer busy or released and repeat the lookup.
    868 		 * if the page we found is neither busy nor released, then we
    869 		 * busy it (so we own it) and plug it into pps[lcv].   this
    870 		 * 'break's the following while loop and indicates we are
    871 		 * ready to move on to the next page in the "lcv" loop above.
    872  		 *
    873  		 * if we exit the while loop with pps[lcv] still set to NULL,
    874 		 * then it means that we allocated a new busy/fake/clean page
    875 		 * ptmp in the object and we need to do I/O to fill in the data.
    876  		 */
    877 
    878 		/* top of "pps" while loop */
    879 		while (pps[lcv] == NULL) {
    880 			/* look for a resident page */
    881 			ptmp = uvm_pagelookup(uobj, current_offset);
    882 
    883 			/* not resident?   allocate one now (if we can) */
    884 			if (ptmp == NULL) {
    885 
    886 				ptmp = uvm_pagealloc(uobj, current_offset,
    887 				    NULL);	/* alloc */
    888 
    889 				/* out of RAM? */
    890 				if (ptmp == NULL) {
    891 					simple_unlock(&uobj->vmobjlock);
    892 					UVMHIST_LOG(pdhist,
    893 					    "sleeping, ptmp == NULL\n",0,0,0,0);
    894 					uvm_wait("uao_getpage");
    895 					simple_lock(&uobj->vmobjlock);
    896 					/* goto top of pps while loop */
    897 					continue;
    898 				}
    899 
    900 				/*
    901 				 * safe with PQ's unlocked: because we just
    902 				 * alloc'd the page
    903 				 */
    904 				ptmp->pqflags |= PQ_AOBJ;
    905 
    906 				/*
    907 				 * got new page ready for I/O.  break pps while
    908 				 * loop.  pps[lcv] is still NULL.
    909 				 */
    910 				break;
    911 			}
    912 
    913 			/* page is there, see if we need to wait on it */
    914 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
    915 				ptmp->flags |= PG_WANTED;
    916 				UVMHIST_LOG(pdhist,
    917 				    "sleeping, ptmp->flags 0x%x\n",
    918 				    ptmp->flags,0,0,0);
    919 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0,
    920 				    "uao_get", 0);
    921 				simple_lock(&uobj->vmobjlock);
    922 				continue;	/* goto top of pps while loop */
    923 			}
    924 
    925 			/*
    926  			 * if we get here then the page has become resident and
    927 			 * unbusy between steps 1 and 2.  we busy it now (so we
    928 			 * own it) and set pps[lcv] (so that we exit the while
    929 			 * loop).
    930  			 */
    931 			/* we own it, caller must un-busy */
    932 			ptmp->flags |= PG_BUSY;
    933 			UVM_PAGE_OWN(ptmp, "uao_get2");
    934 			pps[lcv] = ptmp;
    935 		}
    936 
    937 		/*
    938  		 * if we own the valid page at the correct offset, pps[lcv] will
    939  		 * point to it.   nothing more to do except go to the next page.
    940  		 */
    941 		if (pps[lcv])
    942 			continue;			/* next lcv */
    943 
    944 		/*
    945  		 * we have a "fake/busy/clean" page that we just allocated.
    946  		 * do the needed "i/o", either reading from swap or zeroing.
    947  		 */
    948 		swslot = uao_find_swslot(aobj, current_offset >> PAGE_SHIFT);
    949 
    950 		/*
    951  		 * just zero the page if there's nothing in swap.
    952  		 */
    953 		if (swslot == 0)
    954 		{
    955 			/*
    956 			 * page hasn't existed before, just zero it.
    957 			 */
    958 			uvm_pagezero(ptmp);
    959 		}
    960 		else
    961 		{
    962 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
    963 			     swslot, 0,0,0);
    964 
    965 			/*
    966 			 * page in the swapped-out page.
    967 			 * unlock object for i/o, relock when done.
    968 			 */
    969 			simple_unlock(&uobj->vmobjlock);
    970 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
    971 			simple_lock(&uobj->vmobjlock);
    972 
    973 			/*
    974 			 * I/O done.  check for errors.
    975 			 */
    976 			if (rv != VM_PAGER_OK)
    977 			{
    978 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
    979 				    rv,0,0,0);
    980 				if (ptmp->flags & PG_WANTED)
    981 					/* object lock still held */
    982 					thread_wakeup(ptmp);
    983 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
    984 				UVM_PAGE_OWN(ptmp, NULL);
    985 				uvm_lock_pageq();
    986 				uvm_pagefree(ptmp);
    987 				uvm_unlock_pageq();
    988 				simple_unlock(&uobj->vmobjlock);
    989 				return (rv);
    990 			}
    991 		}
    992 
    993 		/*
    994  		 * we got the page!   clear the fake flag (indicates valid
    995 		 * data now in page) and plug into our result array.   note
    996 		 * that page is still busy.
    997  		 *
    998  		 * it is the callers job to:
    999  		 * => check if the page is released
   1000  		 * => unbusy the page
   1001  		 * => activate the page
   1002  		 */
   1003 
   1004 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1005 		pmap_clear_modify(PMAP_PGARG(ptmp));	/* ... and clean */
   1006 		pps[lcv] = ptmp;
   1007 
   1008 	}	/* lcv loop */
   1009 
   1010 	/*
   1011  	 * finally, unlock object and return.
   1012  	 */
   1013 
   1014 	simple_unlock(&uobj->vmobjlock);
   1015 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1016 	return(VM_PAGER_OK);
   1017 }
   1018 
   1019 /*
   1020  * uao_releasepg: handle released page in an aobj
   1021  *
   1022  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1023  *      to dispose of.
   1024  * => caller must handle PG_WANTED case
   1025  * => called with page's object locked, pageq's unlocked
   1026  * => returns TRUE if page's object is still alive, FALSE if we
   1027  *      killed the page's object.    if we return TRUE, then we
   1028  *      return with the object locked.
   1029  * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
   1030  *                              with the page queues locked [for pagedaemon]
   1031  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1032  * => we kill the aobj if it is not referenced and we are suppose to
   1033  *      kill it ("KILLME").
   1034  */
   1035 static boolean_t uao_releasepg(pg, nextpgp)
   1036 	struct vm_page *pg;
   1037 	struct vm_page **nextpgp;	/* OUT */
   1038 {
   1039 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1040 	int slot;
   1041 
   1042 #ifdef DIAGNOSTIC
   1043 	if ((pg->flags & PG_RELEASED) == 0)
   1044 		panic("uao_releasepg: page not released!");
   1045 #endif
   1046 
   1047 	/*
   1048  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1049  	 */
   1050 	pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE);
   1051 	slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0);
   1052 	if (slot)
   1053 		uvm_swap_free(slot, 1);
   1054 	uvm_lock_pageq();
   1055 	if (nextpgp)
   1056 		*nextpgp = pg->pageq.tqe_next;	/* next page for daemon */
   1057 	uvm_pagefree(pg);
   1058 	if (!nextpgp)
   1059 		uvm_unlock_pageq();			/* keep locked for daemon */
   1060 
   1061 	/*
   1062  	 * if we're not killing the object, we're done.
   1063  	 */
   1064 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1065 		return TRUE;
   1066 
   1067 #ifdef DIAGNOSTIC
   1068 	if (aobj->u_obj.uo_refs)
   1069 		panic("uvm_km_releasepg: kill flag set on referenced object!");
   1070 #endif
   1071 
   1072 	/*
   1073  	 * if there are still pages in the object, we're done for now.
   1074  	 */
   1075 	if (aobj->u_obj.uo_npages != 0)
   1076 		return TRUE;
   1077 
   1078 #ifdef DIAGNOSTIC
   1079 	if (aobj->u_obj.memq.tqh_first)
   1080 		panic("uvn_releasepg: pages in object with npages == 0");
   1081 #endif
   1082 
   1083 	/*
   1084  	 * finally, free the rest.
   1085  	 */
   1086 	uao_free(aobj);
   1087 
   1088 	return FALSE;
   1089 }
   1090