Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.45
      1 /*	$NetBSD: uvm_aobj.c,v 1.45 2001/06/23 20:52:03 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/kernel.h>
     54 #include <sys/pool.h>
     55 #include <sys/kernel.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61  * to keeping the list of resident pages, it also keeps a list of
     62  * allocated swap blocks.  depending on the size of the aobj this list
     63  * of allocated swap blocks is either stored in an array (small objects)
     64  * or in a hash table (large objects).
     65  */
     66 
     67 /*
     68  * local structures
     69  */
     70 
     71 /*
     72  * for hash tables, we break the address space of the aobj into blocks
     73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74  * be a power of two.
     75  */
     76 
     77 #define UAO_SWHASH_CLUSTER_SHIFT 4
     78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79 
     80 /* get the "tag" for this page index */
     81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83 
     84 /* given an ELT and a page index, find the swap slot */
     85 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     86 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     87 
     88 /* given an ELT, return its pageidx base */
     89 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     90 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     91 
     92 /*
     93  * the swhash hash function
     94  */
     95 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     96 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     97 			    & (AOBJ)->u_swhashmask)])
     98 
     99 /*
    100  * the swhash threshhold determines if we will use an array or a
    101  * hash table to store the list of allocated swap blocks.
    102  */
    103 
    104 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    105 #define UAO_USES_SWHASH(AOBJ) \
    106 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    107 
    108 /*
    109  * the number of buckets in a swhash, with an upper bound
    110  */
    111 #define UAO_SWHASH_MAXBUCKETS 256
    112 #define UAO_SWHASH_BUCKETS(AOBJ) \
    113 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    114 	     UAO_SWHASH_MAXBUCKETS))
    115 
    116 
    117 /*
    118  * uao_swhash_elt: when a hash table is being used, this structure defines
    119  * the format of an entry in the bucket list.
    120  */
    121 
    122 struct uao_swhash_elt {
    123 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    124 	voff_t tag;				/* our 'tag' */
    125 	int count;				/* our number of active slots */
    126 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    127 };
    128 
    129 /*
    130  * uao_swhash: the swap hash table structure
    131  */
    132 
    133 LIST_HEAD(uao_swhash, uao_swhash_elt);
    134 
    135 /*
    136  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    137  */
    138 
    139 struct pool uao_swhash_elt_pool;
    140 
    141 /*
    142  * uvm_aobj: the actual anon-backed uvm_object
    143  *
    144  * => the uvm_object is at the top of the structure, this allows
    145  *   (struct uvm_device *) == (struct uvm_object *)
    146  * => only one of u_swslots and u_swhash is used in any given aobj
    147  */
    148 
    149 struct uvm_aobj {
    150 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    151 	int u_pages;		 /* number of pages in entire object */
    152 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    153 	int *u_swslots;		 /* array of offset->swapslot mappings */
    154 				 /*
    155 				  * hashtable of offset->swapslot mappings
    156 				  * (u_swhash is an array of bucket heads)
    157 				  */
    158 	struct uao_swhash *u_swhash;
    159 	u_long u_swhashmask;		/* mask for hashtable */
    160 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    161 };
    162 
    163 /*
    164  * uvm_aobj_pool: pool of uvm_aobj structures
    165  */
    166 
    167 struct pool uvm_aobj_pool;
    168 
    169 /*
    170  * local functions
    171  */
    172 
    173 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    174 							  int, boolean_t));
    175 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
    176 static boolean_t		 uao_flush __P((struct uvm_object *,
    177 						voff_t, voff_t, int));
    178 static void			 uao_free __P((struct uvm_aobj *));
    179 static int			 uao_get __P((struct uvm_object *, voff_t,
    180 					      struct vm_page **, int *, int,
    181 					      vm_prot_t, int, int));
    182 static boolean_t		 uao_releasepg __P((struct vm_page *,
    183 						    struct vm_page **));
    184 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
    185 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
    186 
    187 /*
    188  * aobj_pager
    189  *
    190  * note that some functions (e.g. put) are handled elsewhere
    191  */
    192 
    193 struct uvm_pagerops aobj_pager = {
    194 	NULL,			/* init */
    195 	uao_reference,		/* reference */
    196 	uao_detach,		/* detach */
    197 	NULL,			/* fault */
    198 	uao_flush,		/* flush */
    199 	uao_get,		/* get */
    200 	NULL,			/* put (done by pagedaemon) */
    201 	NULL,			/* cluster */
    202 	NULL,			/* mk_pcluster */
    203 	uao_releasepg		/* releasepg */
    204 };
    205 
    206 /*
    207  * uao_list: global list of active aobjs, locked by uao_list_lock
    208  */
    209 
    210 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    211 static struct simplelock uao_list_lock;
    212 
    213 
    214 /*
    215  * functions
    216  */
    217 
    218 /*
    219  * hash table/array related functions
    220  */
    221 
    222 /*
    223  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    224  * offset.
    225  *
    226  * => the object should be locked by the caller
    227  */
    228 
    229 static struct uao_swhash_elt *
    230 uao_find_swhash_elt(aobj, pageidx, create)
    231 	struct uvm_aobj *aobj;
    232 	int pageidx;
    233 	boolean_t create;
    234 {
    235 	struct uao_swhash *swhash;
    236 	struct uao_swhash_elt *elt;
    237 	voff_t page_tag;
    238 
    239 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    240 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    241 
    242 	/*
    243 	 * now search the bucket for the requested tag
    244 	 */
    245 
    246 	LIST_FOREACH(elt, swhash, list) {
    247 		if (elt->tag == page_tag) {
    248 			return elt;
    249 		}
    250 	}
    251 	if (!create) {
    252 		return NULL;
    253 	}
    254 
    255 	/*
    256 	 * allocate a new entry for the bucket and init/insert it in
    257 	 */
    258 
    259 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    260 	if (elt == NULL) {
    261 		return NULL;
    262 	}
    263 	LIST_INSERT_HEAD(swhash, elt, list);
    264 	elt->tag = page_tag;
    265 	elt->count = 0;
    266 	memset(elt->slots, 0, sizeof(elt->slots));
    267 	return elt;
    268 }
    269 
    270 /*
    271  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    272  *
    273  * => object must be locked by caller
    274  */
    275 __inline static int
    276 uao_find_swslot(aobj, pageidx)
    277 	struct uvm_aobj *aobj;
    278 	int pageidx;
    279 {
    280 
    281 	/*
    282 	 * if noswap flag is set, then we never return a slot
    283 	 */
    284 
    285 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    286 		return(0);
    287 
    288 	/*
    289 	 * if hashing, look in hash table.
    290 	 */
    291 
    292 	if (UAO_USES_SWHASH(aobj)) {
    293 		struct uao_swhash_elt *elt =
    294 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    295 
    296 		if (elt)
    297 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    298 		else
    299 			return(0);
    300 	}
    301 
    302 	/*
    303 	 * otherwise, look in the array
    304 	 */
    305 	return(aobj->u_swslots[pageidx]);
    306 }
    307 
    308 /*
    309  * uao_set_swslot: set the swap slot for a page in an aobj.
    310  *
    311  * => setting a slot to zero frees the slot
    312  * => object must be locked by caller
    313  * => we return the old slot number, or -1 if we failed to allocate
    314  *    memory to record the new slot number
    315  */
    316 int
    317 uao_set_swslot(uobj, pageidx, slot)
    318 	struct uvm_object *uobj;
    319 	int pageidx, slot;
    320 {
    321 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    322 	struct uao_swhash_elt *elt;
    323 	int oldslot;
    324 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    325 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    326 	    aobj, pageidx, slot, 0);
    327 
    328 	/*
    329 	 * if noswap flag is set, then we can't set a slot
    330 	 */
    331 
    332 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    333 
    334 		if (slot == 0)
    335 			return(0);		/* a clear is ok */
    336 
    337 		/* but a set is not */
    338 		printf("uao_set_swslot: uobj = %p\n", uobj);
    339 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    340 	}
    341 
    342 	/*
    343 	 * are we using a hash table?  if so, add it in the hash.
    344 	 */
    345 
    346 	if (UAO_USES_SWHASH(aobj)) {
    347 
    348 		/*
    349 		 * Avoid allocating an entry just to free it again if
    350 		 * the page had not swap slot in the first place, and
    351 		 * we are freeing.
    352 		 */
    353 
    354 		elt = uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    355 		if (elt == NULL) {
    356 			return slot ? -1 : 0;
    357 		}
    358 
    359 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    360 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    361 
    362 		/*
    363 		 * now adjust the elt's reference counter and free it if we've
    364 		 * dropped it to zero.
    365 		 */
    366 
    367 		/* an allocation? */
    368 		if (slot) {
    369 			if (oldslot == 0)
    370 				elt->count++;
    371 		} else {
    372 			if (oldslot)
    373 				elt->count--;
    374 
    375 			if (elt->count == 0) {
    376 				LIST_REMOVE(elt, list);
    377 				pool_put(&uao_swhash_elt_pool, elt);
    378 			}
    379 		}
    380 	} else {
    381 		/* we are using an array */
    382 		oldslot = aobj->u_swslots[pageidx];
    383 		aobj->u_swslots[pageidx] = slot;
    384 	}
    385 	return (oldslot);
    386 }
    387 
    388 /*
    389  * end of hash/array functions
    390  */
    391 
    392 /*
    393  * uao_free: free all resources held by an aobj, and then free the aobj
    394  *
    395  * => the aobj should be dead
    396  */
    397 static void
    398 uao_free(aobj)
    399 	struct uvm_aobj *aobj;
    400 {
    401 
    402 	simple_unlock(&aobj->u_obj.vmobjlock);
    403 
    404 	if (UAO_USES_SWHASH(aobj)) {
    405 		int i, hashbuckets = aobj->u_swhashmask + 1;
    406 
    407 		/*
    408 		 * free the swslots from each hash bucket,
    409 		 * then the hash bucket, and finally the hash table itself.
    410 		 */
    411 		for (i = 0; i < hashbuckets; i++) {
    412 			struct uao_swhash_elt *elt, *next;
    413 
    414 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
    415 			     elt != NULL;
    416 			     elt = next) {
    417 				int j;
    418 
    419 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
    420 					int slot = elt->slots[j];
    421 
    422 					if (slot == 0) {
    423 						continue;
    424 					}
    425 					uvm_swap_free(slot, 1);
    426 
    427 					/*
    428 					 * this page is no longer
    429 					 * only in swap.
    430 					 */
    431 					simple_lock(&uvm.swap_data_lock);
    432 					uvmexp.swpgonly--;
    433 					simple_unlock(&uvm.swap_data_lock);
    434 				}
    435 
    436 				next = LIST_NEXT(elt, list);
    437 				pool_put(&uao_swhash_elt_pool, elt);
    438 			}
    439 		}
    440 		free(aobj->u_swhash, M_UVMAOBJ);
    441 	} else {
    442 		int i;
    443 
    444 		/*
    445 		 * free the array
    446 		 */
    447 
    448 		for (i = 0; i < aobj->u_pages; i++) {
    449 			int slot = aobj->u_swslots[i];
    450 
    451 			if (slot) {
    452 				uvm_swap_free(slot, 1);
    453 
    454 				/* this page is no longer only in swap. */
    455 				simple_lock(&uvm.swap_data_lock);
    456 				uvmexp.swpgonly--;
    457 				simple_unlock(&uvm.swap_data_lock);
    458 			}
    459 		}
    460 		free(aobj->u_swslots, M_UVMAOBJ);
    461 	}
    462 
    463 	/*
    464 	 * finally free the aobj itself
    465 	 */
    466 	pool_put(&uvm_aobj_pool, aobj);
    467 }
    468 
    469 /*
    470  * pager functions
    471  */
    472 
    473 /*
    474  * uao_create: create an aobj of the given size and return its uvm_object.
    475  *
    476  * => for normal use, flags are always zero
    477  * => for the kernel object, the flags are:
    478  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    479  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    480  */
    481 struct uvm_object *
    482 uao_create(size, flags)
    483 	vsize_t size;
    484 	int flags;
    485 {
    486 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
    487 	static int kobj_alloced = 0;			/* not allocated yet */
    488 	int pages = round_page(size) >> PAGE_SHIFT;
    489 	struct uvm_aobj *aobj;
    490 
    491 	/*
    492 	 * malloc a new aobj unless we are asked for the kernel object
    493 	 */
    494 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    495 		if (kobj_alloced)
    496 			panic("uao_create: kernel object already allocated");
    497 
    498 		aobj = &kernel_object_store;
    499 		aobj->u_pages = pages;
    500 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    501 		/* we are special, we never die */
    502 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    503 		kobj_alloced = UAO_FLAG_KERNOBJ;
    504 	} else if (flags & UAO_FLAG_KERNSWAP) {
    505 		aobj = &kernel_object_store;
    506 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    507 		    panic("uao_create: asked to enable swap on kernel object");
    508 		kobj_alloced = UAO_FLAG_KERNSWAP;
    509 	} else {	/* normal object */
    510 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    511 		aobj->u_pages = pages;
    512 		aobj->u_flags = 0;		/* normal object */
    513 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    514 	}
    515 
    516 	/*
    517  	 * allocate hash/array if necessary
    518  	 *
    519  	 * note: in the KERNSWAP case no need to worry about locking since
    520  	 * we are still booting we should be the only thread around.
    521  	 */
    522 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    523 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    524 		    M_NOWAIT : M_WAITOK;
    525 
    526 		/* allocate hash table or array depending on object size */
    527 		if (UAO_USES_SWHASH(aobj)) {
    528 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    529 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    530 			if (aobj->u_swhash == NULL)
    531 				panic("uao_create: hashinit swhash failed");
    532 		} else {
    533 			aobj->u_swslots = malloc(pages * sizeof(int),
    534 			    M_UVMAOBJ, mflags);
    535 			if (aobj->u_swslots == NULL)
    536 				panic("uao_create: malloc swslots failed");
    537 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    538 		}
    539 
    540 		if (flags) {
    541 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    542 			return(&aobj->u_obj);
    543 			/* done! */
    544 		}
    545 	}
    546 
    547 	/*
    548  	 * init aobj fields
    549  	 */
    550 	simple_lock_init(&aobj->u_obj.vmobjlock);
    551 	aobj->u_obj.pgops = &aobj_pager;
    552 	TAILQ_INIT(&aobj->u_obj.memq);
    553 	aobj->u_obj.uo_npages = 0;
    554 
    555 	/*
    556  	 * now that aobj is ready, add it to the global list
    557  	 */
    558 	simple_lock(&uao_list_lock);
    559 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    560 	simple_unlock(&uao_list_lock);
    561 
    562 	/*
    563  	 * done!
    564  	 */
    565 	return(&aobj->u_obj);
    566 }
    567 
    568 
    569 
    570 /*
    571  * uao_init: set up aobj pager subsystem
    572  *
    573  * => called at boot time from uvm_pager_init()
    574  */
    575 void
    576 uao_init()
    577 {
    578 	static int uao_initialized;
    579 
    580 	if (uao_initialized)
    581 		return;
    582 	uao_initialized = TRUE;
    583 
    584 	LIST_INIT(&uao_list);
    585 	simple_lock_init(&uao_list_lock);
    586 
    587 	/*
    588 	 * NOTE: Pages fror this pool must not come from a pageable
    589 	 * kernel map!
    590 	 */
    591 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    592 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    593 
    594 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    595 	    "aobjpl", 0,
    596 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    597 }
    598 
    599 /*
    600  * uao_reference: add a ref to an aobj
    601  *
    602  * => aobj must be unlocked
    603  * => just lock it and call the locked version
    604  */
    605 void
    606 uao_reference(uobj)
    607 	struct uvm_object *uobj;
    608 {
    609 	simple_lock(&uobj->vmobjlock);
    610 	uao_reference_locked(uobj);
    611 	simple_unlock(&uobj->vmobjlock);
    612 }
    613 
    614 /*
    615  * uao_reference_locked: add a ref to an aobj that is already locked
    616  *
    617  * => aobj must be locked
    618  * this needs to be separate from the normal routine
    619  * since sometimes we need to add a reference to an aobj when
    620  * it's already locked.
    621  */
    622 void
    623 uao_reference_locked(uobj)
    624 	struct uvm_object *uobj;
    625 {
    626 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    627 
    628 	/*
    629  	 * kernel_object already has plenty of references, leave it alone.
    630  	 */
    631 
    632 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    633 		return;
    634 
    635 	uobj->uo_refs++;		/* bump! */
    636 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    637 		    uobj, uobj->uo_refs,0,0);
    638 }
    639 
    640 
    641 /*
    642  * uao_detach: drop a reference to an aobj
    643  *
    644  * => aobj must be unlocked
    645  * => just lock it and call the locked version
    646  */
    647 void
    648 uao_detach(uobj)
    649 	struct uvm_object *uobj;
    650 {
    651 	simple_lock(&uobj->vmobjlock);
    652 	uao_detach_locked(uobj);
    653 }
    654 
    655 
    656 /*
    657  * uao_detach_locked: drop a reference to an aobj
    658  *
    659  * => aobj must be locked, and is unlocked (or freed) upon return.
    660  * this needs to be separate from the normal routine
    661  * since sometimes we need to detach from an aobj when
    662  * it's already locked.
    663  */
    664 void
    665 uao_detach_locked(uobj)
    666 	struct uvm_object *uobj;
    667 {
    668 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    669 	struct vm_page *pg, *nextpg;
    670 	boolean_t busybody;
    671 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    672 
    673 	/*
    674  	 * detaching from kernel_object is a noop.
    675  	 */
    676 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    677 		simple_unlock(&uobj->vmobjlock);
    678 		return;
    679 	}
    680 
    681 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    682 	uobj->uo_refs--;				/* drop ref! */
    683 	if (uobj->uo_refs) {				/* still more refs? */
    684 		simple_unlock(&uobj->vmobjlock);
    685 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    686 		return;
    687 	}
    688 
    689 	/*
    690  	 * remove the aobj from the global list.
    691  	 */
    692 	simple_lock(&uao_list_lock);
    693 	LIST_REMOVE(aobj, u_list);
    694 	simple_unlock(&uao_list_lock);
    695 
    696 	/*
    697  	 * free all the pages that aren't PG_BUSY,
    698 	 * mark for release any that are.
    699  	 */
    700 	busybody = FALSE;
    701 	for (pg = TAILQ_FIRST(&uobj->memq); pg != NULL; pg = nextpg) {
    702 		nextpg = TAILQ_NEXT(pg, listq);
    703 		if (pg->flags & PG_BUSY) {
    704 			pg->flags |= PG_RELEASED;
    705 			busybody = TRUE;
    706 			continue;
    707 		}
    708 
    709 		/* zap the mappings, free the swap slot, free the page */
    710 		pmap_page_protect(pg, VM_PROT_NONE);
    711 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    712 		uvm_lock_pageq();
    713 		uvm_pagefree(pg);
    714 		uvm_unlock_pageq();
    715 	}
    716 
    717 	/*
    718  	 * if we found any busy pages, we're done for now.
    719  	 * mark the aobj for death, releasepg will finish up for us.
    720  	 */
    721 	if (busybody) {
    722 		aobj->u_flags |= UAO_FLAG_KILLME;
    723 		simple_unlock(&aobj->u_obj.vmobjlock);
    724 		return;
    725 	}
    726 
    727 	/*
    728  	 * finally, free the rest.
    729  	 */
    730 	uao_free(aobj);
    731 }
    732 
    733 /*
    734  * uao_flush: "flush" pages out of a uvm object
    735  *
    736  * => object should be locked by caller.  we may _unlock_ the object
    737  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    738  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    739  *	XXXJRT a page, we simply just deactivate it.  Should probably
    740  *	XXXJRT handle this better, in the future (although "flushing"
    741  *	XXXJRT anonymous memory isn't terribly important).
    742  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    743  *	or block.
    744  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    745  *	for flushing.
    746  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    747  *	that new pages are inserted on the tail end of the list.  thus,
    748  *	we can make a complete pass through the object in one go by starting
    749  *	at the head and working towards the tail (new pages are put in
    750  *	front of us).
    751  * => NOTE: we are allowed to lock the page queues, so the caller
    752  *	must not be holding the lock on them [e.g. pagedaemon had
    753  *	better not call us with the queues locked]
    754  * => we return TRUE unless we encountered some sort of I/O error
    755  *	XXXJRT currently never happens, as we never directly initiate
    756  *	XXXJRT I/O
    757  *
    758  * comment on "cleaning" object and PG_BUSY pages:
    759  *	this routine is holding the lock on the object.  the only time
    760  *	that is can run into a PG_BUSY page that it does not own is if
    761  *	some other process has started I/O on the page (e.g. either
    762  *	a pagein or a pageout).  if the PG_BUSY page is being paged
    763  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    764  *	had a change to modify it yet.  if the PG_BUSY page is being
    765  *	paged out then it means that someone else has already started
    766  *	cleaning the page for us (how nice!).  in this case, if we
    767  *	have syncio specified, then after we make our pass through the
    768  *	object we need to wait for the other PG_BUSY pages to clear
    769  *	off (i.e. we need to do an iosync).  also note that once a
    770  *	page is PG_BUSY is must stary in its object until it is un-busyed.
    771  *	XXXJRT We never actually do this, as we are "flushing" anonymous
    772  *	XXXJRT memory, which doesn't have persistent backing store.
    773  *
    774  * note on page traversal:
    775  *	we can traverse the pages in an object either by going down the
    776  *	linked list in "uobj->memq", or we can go over the address range
    777  *	by page doing hash table lookups for each address.  depending
    778  *	on how many pages are in the object it may be cheaper to do one
    779  *	or the other.  we set "by_list" to true if we are using memq.
    780  *	if the cost of a hash lookup was equal to the cost of the list
    781  *	traversal we could compare the number of pages in the start->stop
    782  *	range to the total number of pages in the object.  however, it
    783  *	seems that a hash table lookup is more expensive than the linked
    784  *	list traversal, so we multiply the number of pages in the
    785  *	start->stop range by a penalty which we define below.
    786  */
    787 
    788 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
    789 
    790 boolean_t
    791 uao_flush(uobj, start, stop, flags)
    792 	struct uvm_object *uobj;
    793 	voff_t start, stop;
    794 	int flags;
    795 {
    796 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
    797 	struct vm_page *pp, *ppnext;
    798 	boolean_t retval, by_list;
    799 	voff_t curoff;
    800 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
    801 
    802 	curoff = 0;	/* XXX: shut up gcc */
    803 
    804 	retval = TRUE;	/* default to success */
    805 
    806 	if (flags & PGO_ALLPAGES) {
    807 		start = 0;
    808 		stop = aobj->u_pages << PAGE_SHIFT;
    809 		by_list = TRUE;		/* always go by the list */
    810 	} else {
    811 		start = trunc_page(start);
    812 		stop = round_page(stop);
    813 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    814 			printf("uao_flush: strange, got an out of range "
    815 			    "flush (fixed)\n");
    816 			stop = aobj->u_pages << PAGE_SHIFT;
    817 		}
    818 		by_list = (uobj->uo_npages <=
    819 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
    820 	}
    821 
    822 	UVMHIST_LOG(maphist,
    823 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    824 	    start, stop, by_list, flags);
    825 
    826 	/*
    827 	 * Don't need to do any work here if we're not freeing
    828 	 * or deactivating pages.
    829 	 */
    830 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    831 		UVMHIST_LOG(maphist,
    832 		    "<- done (no work to do)",0,0,0,0);
    833 		return (retval);
    834 	}
    835 
    836 	/*
    837 	 * now do it.  note: we must update ppnext in the body of loop or we
    838 	 * will get stuck.  we need to use ppnext because we may free "pp"
    839 	 * before doing the next loop.
    840 	 */
    841 
    842 	if (by_list) {
    843 		pp = uobj->memq.tqh_first;
    844 	} else {
    845 		curoff = start;
    846 		pp = uvm_pagelookup(uobj, curoff);
    847 	}
    848 
    849 	ppnext = NULL;	/* XXX: shut up gcc */
    850 	uvm_lock_pageq();	/* page queues locked */
    851 
    852 	/* locked: both page queues and uobj */
    853 	for ( ; (by_list && pp != NULL) ||
    854 	    (!by_list && curoff < stop) ; pp = ppnext) {
    855 		if (by_list) {
    856 			ppnext = TAILQ_NEXT(pp, listq);
    857 
    858 			/* range check */
    859 			if (pp->offset < start || pp->offset >= stop)
    860 				continue;
    861 		} else {
    862 			curoff += PAGE_SIZE;
    863 			if (curoff < stop)
    864 				ppnext = uvm_pagelookup(uobj, curoff);
    865 
    866 			/* null check */
    867 			if (pp == NULL)
    868 				continue;
    869 		}
    870 
    871 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    872 		/*
    873 		 * XXX In these first 3 cases, we always just
    874 		 * XXX deactivate the page.  We may want to
    875 		 * XXX handle the different cases more specifically
    876 		 * XXX in the future.
    877 		 */
    878 		case PGO_CLEANIT|PGO_FREE:
    879 		case PGO_CLEANIT|PGO_DEACTIVATE:
    880 		case PGO_DEACTIVATE:
    881  deactivate_it:
    882 			/* skip the page if it's loaned or wired */
    883 			if (pp->loan_count != 0 ||
    884 			    pp->wire_count != 0)
    885 				continue;
    886 
    887 			/* ...and deactivate the page. */
    888 			pmap_clear_reference(pp);
    889 			uvm_pagedeactivate(pp);
    890 
    891 			continue;
    892 
    893 		case PGO_FREE:
    894 			/*
    895 			 * If there are multiple references to
    896 			 * the object, just deactivate the page.
    897 			 */
    898 			if (uobj->uo_refs > 1)
    899 				goto deactivate_it;
    900 
    901 			/* XXX skip the page if it's loaned or wired */
    902 			if (pp->loan_count != 0 ||
    903 			    pp->wire_count != 0)
    904 				continue;
    905 
    906 			/*
    907 			 * mark the page as released if its busy.
    908 			 */
    909 			if (pp->flags & PG_BUSY) {
    910 				pp->flags |= PG_RELEASED;
    911 				continue;
    912 			}
    913 
    914 			/* zap all mappings for the page. */
    915 			pmap_page_protect(pp, VM_PROT_NONE);
    916 
    917 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
    918 			uvm_pagefree(pp);
    919 
    920 			continue;
    921 
    922 		default:
    923 			panic("uao_flush: weird flags");
    924 		}
    925 	}
    926 
    927 	uvm_unlock_pageq();
    928 
    929 	UVMHIST_LOG(maphist,
    930 	    "<- done, rv=%d",retval,0,0,0);
    931 	return (retval);
    932 }
    933 
    934 /*
    935  * uao_get: fetch me a page
    936  *
    937  * we have three cases:
    938  * 1: page is resident     -> just return the page.
    939  * 2: page is zero-fill    -> allocate a new page and zero it.
    940  * 3: page is swapped out  -> fetch the page from swap.
    941  *
    942  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    943  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    944  * then we will need to return EBUSY.
    945  *
    946  * => prefer map unlocked (not required)
    947  * => object must be locked!  we will _unlock_ it before starting any I/O.
    948  * => flags: PGO_ALLPAGES: get all of the pages
    949  *           PGO_LOCKED: fault data structures are locked
    950  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    951  * => NOTE: caller must check for released pages!!
    952  */
    953 static int
    954 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    955 	struct uvm_object *uobj;
    956 	voff_t offset;
    957 	struct vm_page **pps;
    958 	int *npagesp;
    959 	int centeridx, advice, flags;
    960 	vm_prot_t access_type;
    961 {
    962 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    963 	voff_t current_offset;
    964 	struct vm_page *ptmp;
    965 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
    966 	boolean_t done;
    967 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    968 
    969 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    970 		    aobj, offset, flags,0);
    971 
    972 	/*
    973  	 * get number of pages
    974  	 */
    975 	maxpages = *npagesp;
    976 
    977 	/*
    978  	 * step 1: handled the case where fault data structures are locked.
    979  	 */
    980 
    981 	if (flags & PGO_LOCKED) {
    982 		/*
    983  		 * step 1a: get pages that are already resident.   only do
    984 		 * this if the data structures are locked (i.e. the first
    985 		 * time through).
    986  		 */
    987 
    988 		done = TRUE;	/* be optimistic */
    989 		gotpages = 0;	/* # of pages we got so far */
    990 
    991 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    992 		    lcv++, current_offset += PAGE_SIZE) {
    993 			/* do we care about this page?  if not, skip it */
    994 			if (pps[lcv] == PGO_DONTCARE)
    995 				continue;
    996 
    997 			ptmp = uvm_pagelookup(uobj, current_offset);
    998 
    999 			/*
   1000  			 * if page is new, attempt to allocate the page,
   1001 			 * zero-fill'd.
   1002  			 */
   1003 			if (ptmp == NULL && uao_find_swslot(aobj,
   1004 			    current_offset >> PAGE_SHIFT) == 0) {
   1005 				ptmp = uvm_pagealloc(uobj, current_offset,
   1006 				    NULL, UVM_PGA_ZERO);
   1007 				if (ptmp) {
   1008 					/* new page */
   1009 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
   1010 					ptmp->pqflags |= PQ_AOBJ;
   1011 					UVM_PAGE_OWN(ptmp, NULL);
   1012 				}
   1013 			}
   1014 
   1015 			/*
   1016 			 * to be useful must get a non-busy, non-released page
   1017 			 */
   1018 			if (ptmp == NULL ||
   1019 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1020 				if (lcv == centeridx ||
   1021 				    (flags & PGO_ALLPAGES) != 0)
   1022 					/* need to do a wait or I/O! */
   1023 					done = FALSE;
   1024 					continue;
   1025 			}
   1026 
   1027 			/*
   1028 			 * useful page: busy/lock it and plug it in our
   1029 			 * result array
   1030 			 */
   1031 			/* caller must un-busy this page */
   1032 			ptmp->flags |= PG_BUSY;
   1033 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1034 			pps[lcv] = ptmp;
   1035 			gotpages++;
   1036 
   1037 		}	/* "for" lcv loop */
   1038 
   1039 		/*
   1040  		 * step 1b: now we've either done everything needed or we
   1041 		 * to unlock and do some waiting or I/O.
   1042  		 */
   1043 
   1044 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1045 
   1046 		*npagesp = gotpages;
   1047 		if (done)
   1048 			/* bingo! */
   1049 			return(0);
   1050 		else
   1051 			/* EEK!   Need to unlock and I/O */
   1052 			return(EBUSY);
   1053 	}
   1054 
   1055 	/*
   1056  	 * step 2: get non-resident or busy pages.
   1057  	 * object is locked.   data structures are unlocked.
   1058  	 */
   1059 
   1060 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1061 	    lcv++, current_offset += PAGE_SIZE) {
   1062 
   1063 		/*
   1064 		 * - skip over pages we've already gotten or don't want
   1065 		 * - skip over pages we don't _have_ to get
   1066 		 */
   1067 
   1068 		if (pps[lcv] != NULL ||
   1069 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1070 			continue;
   1071 
   1072 		pageidx = current_offset >> PAGE_SHIFT;
   1073 
   1074 		/*
   1075  		 * we have yet to locate the current page (pps[lcv]).   we
   1076 		 * first look for a page that is already at the current offset.
   1077 		 * if we find a page, we check to see if it is busy or
   1078 		 * released.  if that is the case, then we sleep on the page
   1079 		 * until it is no longer busy or released and repeat the lookup.
   1080 		 * if the page we found is neither busy nor released, then we
   1081 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1082 		 * 'break's the following while loop and indicates we are
   1083 		 * ready to move on to the next page in the "lcv" loop above.
   1084  		 *
   1085  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1086 		 * then it means that we allocated a new busy/fake/clean page
   1087 		 * ptmp in the object and we need to do I/O to fill in the data.
   1088  		 */
   1089 
   1090 		/* top of "pps" while loop */
   1091 		while (pps[lcv] == NULL) {
   1092 			/* look for a resident page */
   1093 			ptmp = uvm_pagelookup(uobj, current_offset);
   1094 
   1095 			/* not resident?   allocate one now (if we can) */
   1096 			if (ptmp == NULL) {
   1097 
   1098 				ptmp = uvm_pagealloc(uobj, current_offset,
   1099 				    NULL, 0);
   1100 
   1101 				/* out of RAM? */
   1102 				if (ptmp == NULL) {
   1103 					simple_unlock(&uobj->vmobjlock);
   1104 					UVMHIST_LOG(pdhist,
   1105 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1106 					uvm_wait("uao_getpage");
   1107 					simple_lock(&uobj->vmobjlock);
   1108 					/* goto top of pps while loop */
   1109 					continue;
   1110 				}
   1111 
   1112 				/*
   1113 				 * safe with PQ's unlocked: because we just
   1114 				 * alloc'd the page
   1115 				 */
   1116 				ptmp->pqflags |= PQ_AOBJ;
   1117 
   1118 				/*
   1119 				 * got new page ready for I/O.  break pps while
   1120 				 * loop.  pps[lcv] is still NULL.
   1121 				 */
   1122 				break;
   1123 			}
   1124 
   1125 			/* page is there, see if we need to wait on it */
   1126 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1127 				ptmp->flags |= PG_WANTED;
   1128 				UVMHIST_LOG(pdhist,
   1129 				    "sleeping, ptmp->flags 0x%x\n",
   1130 				    ptmp->flags,0,0,0);
   1131 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1132 				    FALSE, "uao_get", 0);
   1133 				simple_lock(&uobj->vmobjlock);
   1134 				continue;	/* goto top of pps while loop */
   1135 			}
   1136 
   1137 			/*
   1138  			 * if we get here then the page has become resident and
   1139 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1140 			 * own it) and set pps[lcv] (so that we exit the while
   1141 			 * loop).
   1142  			 */
   1143 			/* we own it, caller must un-busy */
   1144 			ptmp->flags |= PG_BUSY;
   1145 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1146 			pps[lcv] = ptmp;
   1147 		}
   1148 
   1149 		/*
   1150  		 * if we own the valid page at the correct offset, pps[lcv] will
   1151  		 * point to it.   nothing more to do except go to the next page.
   1152  		 */
   1153 		if (pps[lcv])
   1154 			continue;			/* next lcv */
   1155 
   1156 		/*
   1157  		 * we have a "fake/busy/clean" page that we just allocated.
   1158  		 * do the needed "i/o", either reading from swap or zeroing.
   1159  		 */
   1160 		swslot = uao_find_swslot(aobj, pageidx);
   1161 
   1162 		/*
   1163  		 * just zero the page if there's nothing in swap.
   1164  		 */
   1165 		if (swslot == 0)
   1166 		{
   1167 			/*
   1168 			 * page hasn't existed before, just zero it.
   1169 			 */
   1170 			uvm_pagezero(ptmp);
   1171 		} else {
   1172 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1173 			     swslot, 0,0,0);
   1174 
   1175 			/*
   1176 			 * page in the swapped-out page.
   1177 			 * unlock object for i/o, relock when done.
   1178 			 */
   1179 			simple_unlock(&uobj->vmobjlock);
   1180 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1181 			simple_lock(&uobj->vmobjlock);
   1182 
   1183 			/*
   1184 			 * I/O done.  check for errors.
   1185 			 */
   1186 			if (rv != 0)
   1187 			{
   1188 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1189 				    rv,0,0,0);
   1190 				if (ptmp->flags & PG_WANTED)
   1191 					wakeup(ptmp);
   1192 
   1193 				/*
   1194 				 * remove the swap slot from the aobj
   1195 				 * and mark the aobj as having no real slot.
   1196 				 * don't free the swap slot, thus preventing
   1197 				 * it from being used again.
   1198 				 */
   1199 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1200 							SWSLOT_BAD);
   1201 				if (swslot != -1) {
   1202 					uvm_swap_markbad(swslot, 1);
   1203 				}
   1204 
   1205 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
   1206 				UVM_PAGE_OWN(ptmp, NULL);
   1207 				uvm_lock_pageq();
   1208 				uvm_pagefree(ptmp);
   1209 				uvm_unlock_pageq();
   1210 
   1211 				simple_unlock(&uobj->vmobjlock);
   1212 				return (rv);
   1213 			}
   1214 		}
   1215 
   1216 		/*
   1217  		 * we got the page!   clear the fake flag (indicates valid
   1218 		 * data now in page) and plug into our result array.   note
   1219 		 * that page is still busy.
   1220  		 *
   1221  		 * it is the callers job to:
   1222  		 * => check if the page is released
   1223  		 * => unbusy the page
   1224  		 * => activate the page
   1225  		 */
   1226 
   1227 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1228 		pmap_clear_modify(ptmp);		/* ... and clean */
   1229 		pps[lcv] = ptmp;
   1230 
   1231 	}	/* lcv loop */
   1232 
   1233 	/*
   1234  	 * finally, unlock object and return.
   1235  	 */
   1236 
   1237 	simple_unlock(&uobj->vmobjlock);
   1238 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1239 	return(0);
   1240 }
   1241 
   1242 /*
   1243  * uao_releasepg: handle released page in an aobj
   1244  *
   1245  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1246  *      to dispose of.
   1247  * => caller must handle PG_WANTED case
   1248  * => called with page's object locked, pageq's unlocked
   1249  * => returns TRUE if page's object is still alive, FALSE if we
   1250  *      killed the page's object.    if we return TRUE, then we
   1251  *      return with the object locked.
   1252  * => if (nextpgp != NULL) => we return the next page on the queue, and return
   1253  *                              with the page queues locked [for pagedaemon]
   1254  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1255  * => we kill the aobj if it is not referenced and we are suppose to
   1256  *      kill it ("KILLME").
   1257  */
   1258 static boolean_t
   1259 uao_releasepg(pg, nextpgp)
   1260 	struct vm_page *pg;
   1261 	struct vm_page **nextpgp;	/* OUT */
   1262 {
   1263 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1264 
   1265 	KASSERT(pg->flags & PG_RELEASED);
   1266 
   1267 	/*
   1268  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1269  	 */
   1270 	pmap_page_protect(pg, VM_PROT_NONE);
   1271 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
   1272 	uvm_lock_pageq();
   1273 	if (nextpgp)
   1274 		*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
   1275 	uvm_pagefree(pg);
   1276 	if (!nextpgp)
   1277 		uvm_unlock_pageq();		/* keep locked for daemon */
   1278 
   1279 	/*
   1280  	 * if we're not killing the object, we're done.
   1281  	 */
   1282 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1283 		return TRUE;
   1284 	KASSERT(aobj->u_obj.uo_refs == 0);
   1285 
   1286 	/*
   1287  	 * if there are still pages in the object, we're done for now.
   1288  	 */
   1289 	if (aobj->u_obj.uo_npages != 0)
   1290 		return TRUE;
   1291 
   1292 	KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
   1293 
   1294 	/*
   1295  	 * finally, free the rest.
   1296  	 */
   1297 	uao_free(aobj);
   1298 
   1299 	return FALSE;
   1300 }
   1301 
   1302 
   1303 /*
   1304  * uao_dropswap:  release any swap resources from this aobj page.
   1305  *
   1306  * => aobj must be locked or have a reference count of 0.
   1307  */
   1308 
   1309 void
   1310 uao_dropswap(uobj, pageidx)
   1311 	struct uvm_object *uobj;
   1312 	int pageidx;
   1313 {
   1314 	int slot;
   1315 
   1316 	slot = uao_set_swslot(uobj, pageidx, 0);
   1317 	if (slot) {
   1318 		uvm_swap_free(slot, 1);
   1319 	}
   1320 }
   1321 
   1322 
   1323 /*
   1324  * page in every page in every aobj that is paged-out to a range of swslots.
   1325  *
   1326  * => nothing should be locked.
   1327  * => returns TRUE if pagein was aborted due to lack of memory.
   1328  */
   1329 boolean_t
   1330 uao_swap_off(startslot, endslot)
   1331 	int startslot, endslot;
   1332 {
   1333 	struct uvm_aobj *aobj, *nextaobj;
   1334 
   1335 	/*
   1336 	 * walk the list of all aobjs.
   1337 	 */
   1338 
   1339 restart:
   1340 	simple_lock(&uao_list_lock);
   1341 
   1342 	for (aobj = LIST_FIRST(&uao_list);
   1343 	     aobj != NULL;
   1344 	     aobj = nextaobj) {
   1345 		boolean_t rv;
   1346 
   1347 		/*
   1348 		 * try to get the object lock,
   1349 		 * start all over if we fail.
   1350 		 * most of the time we'll get the aobj lock,
   1351 		 * so this should be a rare case.
   1352 		 */
   1353 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
   1354 			simple_unlock(&uao_list_lock);
   1355 			goto restart;
   1356 		}
   1357 
   1358 		/*
   1359 		 * add a ref to the aobj so it doesn't disappear
   1360 		 * while we're working.
   1361 		 */
   1362 		uao_reference_locked(&aobj->u_obj);
   1363 
   1364 		/*
   1365 		 * now it's safe to unlock the uao list.
   1366 		 */
   1367 		simple_unlock(&uao_list_lock);
   1368 
   1369 		/*
   1370 		 * page in any pages in the swslot range.
   1371 		 * if there's an error, abort and return the error.
   1372 		 */
   1373 		rv = uao_pagein(aobj, startslot, endslot);
   1374 		if (rv) {
   1375 			uao_detach_locked(&aobj->u_obj);
   1376 			return rv;
   1377 		}
   1378 
   1379 		/*
   1380 		 * we're done with this aobj.
   1381 		 * relock the list and drop our ref on the aobj.
   1382 		 */
   1383 		simple_lock(&uao_list_lock);
   1384 		nextaobj = LIST_NEXT(aobj, u_list);
   1385 		uao_detach_locked(&aobj->u_obj);
   1386 	}
   1387 
   1388 	/*
   1389 	 * done with traversal, unlock the list
   1390 	 */
   1391 	simple_unlock(&uao_list_lock);
   1392 	return FALSE;
   1393 }
   1394 
   1395 
   1396 /*
   1397  * page in any pages from aobj in the given range.
   1398  *
   1399  * => aobj must be locked and is returned locked.
   1400  * => returns TRUE if pagein was aborted due to lack of memory.
   1401  */
   1402 static boolean_t
   1403 uao_pagein(aobj, startslot, endslot)
   1404 	struct uvm_aobj *aobj;
   1405 	int startslot, endslot;
   1406 {
   1407 	boolean_t rv;
   1408 
   1409 	if (UAO_USES_SWHASH(aobj)) {
   1410 		struct uao_swhash_elt *elt;
   1411 		int bucket;
   1412 
   1413 restart:
   1414 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
   1415 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
   1416 			     elt != NULL;
   1417 			     elt = LIST_NEXT(elt, list)) {
   1418 				int i;
   1419 
   1420 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1421 					int slot = elt->slots[i];
   1422 
   1423 					/*
   1424 					 * if the slot isn't in range, skip it.
   1425 					 */
   1426 					if (slot < startslot ||
   1427 					    slot >= endslot) {
   1428 						continue;
   1429 					}
   1430 
   1431 					/*
   1432 					 * process the page,
   1433 					 * the start over on this object
   1434 					 * since the swhash elt
   1435 					 * may have been freed.
   1436 					 */
   1437 					rv = uao_pagein_page(aobj,
   1438 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1439 					if (rv) {
   1440 						return rv;
   1441 					}
   1442 					goto restart;
   1443 				}
   1444 			}
   1445 		}
   1446 	} else {
   1447 		int i;
   1448 
   1449 		for (i = 0; i < aobj->u_pages; i++) {
   1450 			int slot = aobj->u_swslots[i];
   1451 
   1452 			/*
   1453 			 * if the slot isn't in range, skip it
   1454 			 */
   1455 			if (slot < startslot || slot >= endslot) {
   1456 				continue;
   1457 			}
   1458 
   1459 			/*
   1460 			 * process the page.
   1461 			 */
   1462 			rv = uao_pagein_page(aobj, i);
   1463 			if (rv) {
   1464 				return rv;
   1465 			}
   1466 		}
   1467 	}
   1468 
   1469 	return FALSE;
   1470 }
   1471 
   1472 /*
   1473  * page in a page from an aobj.  used for swap_off.
   1474  * returns TRUE if pagein was aborted due to lack of memory.
   1475  *
   1476  * => aobj must be locked and is returned locked.
   1477  */
   1478 static boolean_t
   1479 uao_pagein_page(aobj, pageidx)
   1480 	struct uvm_aobj *aobj;
   1481 	int pageidx;
   1482 {
   1483 	struct vm_page *pg;
   1484 	int rv, slot, npages;
   1485 
   1486 	pg = NULL;
   1487 	npages = 1;
   1488 	/* locked: aobj */
   1489 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1490 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
   1491 	/* unlocked: aobj */
   1492 
   1493 	/*
   1494 	 * relock and finish up.
   1495 	 */
   1496 	simple_lock(&aobj->u_obj.vmobjlock);
   1497 
   1498 	switch (rv) {
   1499 	case 0:
   1500 		break;
   1501 
   1502 	case EIO:
   1503 	case ERESTART:
   1504 		/*
   1505 		 * nothing more to do on errors.
   1506 		 * ERESTART can only mean that the anon was freed,
   1507 		 * so again there's nothing to do.
   1508 		 */
   1509 		return FALSE;
   1510 
   1511 	}
   1512 	KASSERT((pg->flags & PG_RELEASED) == 0);
   1513 
   1514 	/*
   1515 	 * ok, we've got the page now.
   1516 	 * mark it as dirty, clear its swslot and un-busy it.
   1517 	 */
   1518 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
   1519 	uvm_swap_free(slot, 1);
   1520 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
   1521 	UVM_PAGE_OWN(pg, NULL);
   1522 
   1523 	/*
   1524 	 * deactivate the page (to put it on a page queue).
   1525 	 */
   1526 	pmap_clear_reference(pg);
   1527 	uvm_lock_pageq();
   1528 	uvm_pagedeactivate(pg);
   1529 	uvm_unlock_pageq();
   1530 
   1531 	return FALSE;
   1532 }
   1533