Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.40
      1 /*	$NetBSD: uvm_aobj.c,v 1.40 2001/03/10 22:46:47 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 
     46 
     47 #include "opt_uvmhist.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/kernel.h>
     54 #include <sys/pool.h>
     55 #include <sys/kernel.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61  * to keeping the list of resident pages, it also keeps a list of
     62  * allocated swap blocks.  depending on the size of the aobj this list
     63  * of allocated swap blocks is either stored in an array (small objects)
     64  * or in a hash table (large objects).
     65  */
     66 
     67 /*
     68  * local structures
     69  */
     70 
     71 /*
     72  * for hash tables, we break the address space of the aobj into blocks
     73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74  * be a power of two.
     75  */
     76 
     77 #define UAO_SWHASH_CLUSTER_SHIFT 4
     78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79 
     80 /* get the "tag" for this page index */
     81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83 
     84 /* given an ELT and a page index, find the swap slot */
     85 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     86 	((ELT)->slots[(PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1)])
     87 
     88 /* given an ELT, return its pageidx base */
     89 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     90 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     91 
     92 /*
     93  * the swhash hash function
     94  */
     95 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     96 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     97 			    & (AOBJ)->u_swhashmask)])
     98 
     99 /*
    100  * the swhash threshhold determines if we will use an array or a
    101  * hash table to store the list of allocated swap blocks.
    102  */
    103 
    104 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    105 #define UAO_USES_SWHASH(AOBJ) \
    106 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    107 
    108 /*
    109  * the number of buckets in a swhash, with an upper bound
    110  */
    111 #define UAO_SWHASH_MAXBUCKETS 256
    112 #define UAO_SWHASH_BUCKETS(AOBJ) \
    113 	(min((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    114 	     UAO_SWHASH_MAXBUCKETS))
    115 
    116 
    117 /*
    118  * uao_swhash_elt: when a hash table is being used, this structure defines
    119  * the format of an entry in the bucket list.
    120  */
    121 
    122 struct uao_swhash_elt {
    123 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    124 	voff_t tag;				/* our 'tag' */
    125 	int count;				/* our number of active slots */
    126 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    127 };
    128 
    129 /*
    130  * uao_swhash: the swap hash table structure
    131  */
    132 
    133 LIST_HEAD(uao_swhash, uao_swhash_elt);
    134 
    135 /*
    136  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    137  */
    138 
    139 struct pool uao_swhash_elt_pool;
    140 
    141 /*
    142  * uvm_aobj: the actual anon-backed uvm_object
    143  *
    144  * => the uvm_object is at the top of the structure, this allows
    145  *   (struct uvm_device *) == (struct uvm_object *)
    146  * => only one of u_swslots and u_swhash is used in any given aobj
    147  */
    148 
    149 struct uvm_aobj {
    150 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    151 	int u_pages;		 /* number of pages in entire object */
    152 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    153 	int *u_swslots;		 /* array of offset->swapslot mappings */
    154 				 /*
    155 				  * hashtable of offset->swapslot mappings
    156 				  * (u_swhash is an array of bucket heads)
    157 				  */
    158 	struct uao_swhash *u_swhash;
    159 	u_long u_swhashmask;		/* mask for hashtable */
    160 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    161 };
    162 
    163 /*
    164  * uvm_aobj_pool: pool of uvm_aobj structures
    165  */
    166 
    167 struct pool uvm_aobj_pool;
    168 
    169 /*
    170  * local functions
    171  */
    172 
    173 static struct uao_swhash_elt	*uao_find_swhash_elt __P((struct uvm_aobj *,
    174 							  int, boolean_t));
    175 static int			 uao_find_swslot __P((struct uvm_aobj *, int));
    176 static boolean_t		 uao_flush __P((struct uvm_object *,
    177 						voff_t, voff_t, int));
    178 static void			 uao_free __P((struct uvm_aobj *));
    179 static int			 uao_get __P((struct uvm_object *, voff_t,
    180 					      vm_page_t *, int *, int,
    181 					      vm_prot_t, int, int));
    182 static boolean_t		 uao_releasepg __P((struct vm_page *,
    183 						    struct vm_page **));
    184 static boolean_t		 uao_pagein __P((struct uvm_aobj *, int, int));
    185 static boolean_t		 uao_pagein_page __P((struct uvm_aobj *, int));
    186 
    187 /*
    188  * aobj_pager
    189  *
    190  * note that some functions (e.g. put) are handled elsewhere
    191  */
    192 
    193 struct uvm_pagerops aobj_pager = {
    194 	NULL,			/* init */
    195 	uao_reference,		/* reference */
    196 	uao_detach,		/* detach */
    197 	NULL,			/* fault */
    198 	uao_flush,		/* flush */
    199 	uao_get,		/* get */
    200 	NULL,			/* put (done by pagedaemon) */
    201 	NULL,			/* cluster */
    202 	NULL,			/* mk_pcluster */
    203 	uao_releasepg		/* releasepg */
    204 };
    205 
    206 /*
    207  * uao_list: global list of active aobjs, locked by uao_list_lock
    208  */
    209 
    210 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    211 static simple_lock_data_t uao_list_lock;
    212 
    213 
    214 /*
    215  * functions
    216  */
    217 
    218 /*
    219  * hash table/array related functions
    220  */
    221 
    222 /*
    223  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    224  * offset.
    225  *
    226  * => the object should be locked by the caller
    227  */
    228 
    229 static struct uao_swhash_elt *
    230 uao_find_swhash_elt(aobj, pageidx, create)
    231 	struct uvm_aobj *aobj;
    232 	int pageidx;
    233 	boolean_t create;
    234 {
    235 	struct uao_swhash *swhash;
    236 	struct uao_swhash_elt *elt;
    237 	voff_t page_tag;
    238 
    239 	swhash = UAO_SWHASH_HASH(aobj, pageidx); /* first hash to get bucket */
    240 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);	/* tag to search for */
    241 
    242 	/*
    243 	 * now search the bucket for the requested tag
    244 	 */
    245 	LIST_FOREACH(elt, swhash, list) {
    246 		if (elt->tag == page_tag)
    247 			return(elt);
    248 	}
    249 
    250 	/* fail now if we are not allowed to create a new entry in the bucket */
    251 	if (!create)
    252 		return NULL;
    253 
    254 
    255 	/*
    256 	 * allocate a new entry for the bucket and init/insert it in
    257 	 */
    258 	elt = pool_get(&uao_swhash_elt_pool, PR_WAITOK);
    259 	LIST_INSERT_HEAD(swhash, elt, list);
    260 	elt->tag = page_tag;
    261 	elt->count = 0;
    262 	memset(elt->slots, 0, sizeof(elt->slots));
    263 
    264 	return(elt);
    265 }
    266 
    267 /*
    268  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    269  *
    270  * => object must be locked by caller
    271  */
    272 __inline static int
    273 uao_find_swslot(aobj, pageidx)
    274 	struct uvm_aobj *aobj;
    275 	int pageidx;
    276 {
    277 
    278 	/*
    279 	 * if noswap flag is set, then we never return a slot
    280 	 */
    281 
    282 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    283 		return(0);
    284 
    285 	/*
    286 	 * if hashing, look in hash table.
    287 	 */
    288 
    289 	if (UAO_USES_SWHASH(aobj)) {
    290 		struct uao_swhash_elt *elt =
    291 		    uao_find_swhash_elt(aobj, pageidx, FALSE);
    292 
    293 		if (elt)
    294 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    295 		else
    296 			return(0);
    297 	}
    298 
    299 	/*
    300 	 * otherwise, look in the array
    301 	 */
    302 	return(aobj->u_swslots[pageidx]);
    303 }
    304 
    305 /*
    306  * uao_set_swslot: set the swap slot for a page in an aobj.
    307  *
    308  * => setting a slot to zero frees the slot
    309  * => object must be locked by caller
    310  */
    311 int
    312 uao_set_swslot(uobj, pageidx, slot)
    313 	struct uvm_object *uobj;
    314 	int pageidx, slot;
    315 {
    316 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    317 	int oldslot;
    318 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    319 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    320 	    aobj, pageidx, slot, 0);
    321 
    322 	/*
    323 	 * if noswap flag is set, then we can't set a slot
    324 	 */
    325 
    326 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    327 
    328 		if (slot == 0)
    329 			return(0);		/* a clear is ok */
    330 
    331 		/* but a set is not */
    332 		printf("uao_set_swslot: uobj = %p\n", uobj);
    333 	    panic("uao_set_swslot: attempt to set a slot on a NOSWAP object");
    334 	}
    335 
    336 	/*
    337 	 * are we using a hash table?  if so, add it in the hash.
    338 	 */
    339 
    340 	if (UAO_USES_SWHASH(aobj)) {
    341 
    342 		/*
    343 		 * Avoid allocating an entry just to free it again if
    344 		 * the page had not swap slot in the first place, and
    345 		 * we are freeing.
    346 		 */
    347 
    348 		struct uao_swhash_elt *elt =
    349 		    uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
    350 		if (elt == NULL) {
    351 			KASSERT(slot == 0);
    352 			return (0);
    353 		}
    354 
    355 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    356 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    357 
    358 		/*
    359 		 * now adjust the elt's reference counter and free it if we've
    360 		 * dropped it to zero.
    361 		 */
    362 
    363 		/* an allocation? */
    364 		if (slot) {
    365 			if (oldslot == 0)
    366 				elt->count++;
    367 		} else {		/* freeing slot ... */
    368 			if (oldslot)	/* to be safe */
    369 				elt->count--;
    370 
    371 			if (elt->count == 0) {
    372 				LIST_REMOVE(elt, list);
    373 				pool_put(&uao_swhash_elt_pool, elt);
    374 			}
    375 		}
    376 	} else {
    377 		/* we are using an array */
    378 		oldslot = aobj->u_swslots[pageidx];
    379 		aobj->u_swslots[pageidx] = slot;
    380 	}
    381 	return (oldslot);
    382 }
    383 
    384 /*
    385  * end of hash/array functions
    386  */
    387 
    388 /*
    389  * uao_free: free all resources held by an aobj, and then free the aobj
    390  *
    391  * => the aobj should be dead
    392  */
    393 static void
    394 uao_free(aobj)
    395 	struct uvm_aobj *aobj;
    396 {
    397 
    398 	simple_unlock(&aobj->u_obj.vmobjlock);
    399 
    400 	if (UAO_USES_SWHASH(aobj)) {
    401 		int i, hashbuckets = aobj->u_swhashmask + 1;
    402 
    403 		/*
    404 		 * free the swslots from each hash bucket,
    405 		 * then the hash bucket, and finally the hash table itself.
    406 		 */
    407 		for (i = 0; i < hashbuckets; i++) {
    408 			struct uao_swhash_elt *elt, *next;
    409 
    410 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
    411 			     elt != NULL;
    412 			     elt = next) {
    413 				int j;
    414 
    415 				for (j = 0; j < UAO_SWHASH_CLUSTER_SIZE; j++) {
    416 					int slot = elt->slots[j];
    417 
    418 					if (slot == 0) {
    419 						continue;
    420 					}
    421 					uvm_swap_free(slot, 1);
    422 
    423 					/*
    424 					 * this page is no longer
    425 					 * only in swap.
    426 					 */
    427 					simple_lock(&uvm.swap_data_lock);
    428 					uvmexp.swpgonly--;
    429 					simple_unlock(&uvm.swap_data_lock);
    430 				}
    431 
    432 				next = LIST_NEXT(elt, list);
    433 				pool_put(&uao_swhash_elt_pool, elt);
    434 			}
    435 		}
    436 		free(aobj->u_swhash, M_UVMAOBJ);
    437 	} else {
    438 		int i;
    439 
    440 		/*
    441 		 * free the array
    442 		 */
    443 
    444 		for (i = 0; i < aobj->u_pages; i++) {
    445 			int slot = aobj->u_swslots[i];
    446 
    447 			if (slot) {
    448 				uvm_swap_free(slot, 1);
    449 
    450 				/* this page is no longer only in swap. */
    451 				simple_lock(&uvm.swap_data_lock);
    452 				uvmexp.swpgonly--;
    453 				simple_unlock(&uvm.swap_data_lock);
    454 			}
    455 		}
    456 		free(aobj->u_swslots, M_UVMAOBJ);
    457 	}
    458 
    459 	/*
    460 	 * finally free the aobj itself
    461 	 */
    462 	pool_put(&uvm_aobj_pool, aobj);
    463 }
    464 
    465 /*
    466  * pager functions
    467  */
    468 
    469 /*
    470  * uao_create: create an aobj of the given size and return its uvm_object.
    471  *
    472  * => for normal use, flags are always zero
    473  * => for the kernel object, the flags are:
    474  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    475  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    476  */
    477 struct uvm_object *
    478 uao_create(size, flags)
    479 	vsize_t size;
    480 	int flags;
    481 {
    482 	static struct uvm_aobj kernel_object_store; /* home of kernel_object */
    483 	static int kobj_alloced = 0;			/* not allocated yet */
    484 	int pages = round_page(size) >> PAGE_SHIFT;
    485 	struct uvm_aobj *aobj;
    486 
    487 	/*
    488 	 * malloc a new aobj unless we are asked for the kernel object
    489 	 */
    490 	if (flags & UAO_FLAG_KERNOBJ) {		/* want kernel object? */
    491 		if (kobj_alloced)
    492 			panic("uao_create: kernel object already allocated");
    493 
    494 		aobj = &kernel_object_store;
    495 		aobj->u_pages = pages;
    496 		aobj->u_flags = UAO_FLAG_NOSWAP;	/* no swap to start */
    497 		/* we are special, we never die */
    498 		aobj->u_obj.uo_refs = UVM_OBJ_KERN;
    499 		kobj_alloced = UAO_FLAG_KERNOBJ;
    500 	} else if (flags & UAO_FLAG_KERNSWAP) {
    501 		aobj = &kernel_object_store;
    502 		if (kobj_alloced != UAO_FLAG_KERNOBJ)
    503 		    panic("uao_create: asked to enable swap on kernel object");
    504 		kobj_alloced = UAO_FLAG_KERNSWAP;
    505 	} else {	/* normal object */
    506 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    507 		aobj->u_pages = pages;
    508 		aobj->u_flags = 0;		/* normal object */
    509 		aobj->u_obj.uo_refs = 1;	/* start with 1 reference */
    510 	}
    511 
    512 	/*
    513  	 * allocate hash/array if necessary
    514  	 *
    515  	 * note: in the KERNSWAP case no need to worry about locking since
    516  	 * we are still booting we should be the only thread around.
    517  	 */
    518 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    519 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    520 		    M_NOWAIT : M_WAITOK;
    521 
    522 		/* allocate hash table or array depending on object size */
    523 		if (UAO_USES_SWHASH(aobj)) {
    524 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    525 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    526 			if (aobj->u_swhash == NULL)
    527 				panic("uao_create: hashinit swhash failed");
    528 		} else {
    529 			aobj->u_swslots = malloc(pages * sizeof(int),
    530 			    M_UVMAOBJ, mflags);
    531 			if (aobj->u_swslots == NULL)
    532 				panic("uao_create: malloc swslots failed");
    533 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    534 		}
    535 
    536 		if (flags) {
    537 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    538 			return(&aobj->u_obj);
    539 			/* done! */
    540 		}
    541 	}
    542 
    543 	/*
    544  	 * init aobj fields
    545  	 */
    546 	simple_lock_init(&aobj->u_obj.vmobjlock);
    547 	aobj->u_obj.pgops = &aobj_pager;
    548 	TAILQ_INIT(&aobj->u_obj.memq);
    549 	aobj->u_obj.uo_npages = 0;
    550 
    551 	/*
    552  	 * now that aobj is ready, add it to the global list
    553  	 */
    554 	simple_lock(&uao_list_lock);
    555 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    556 	simple_unlock(&uao_list_lock);
    557 
    558 	/*
    559  	 * done!
    560  	 */
    561 	return(&aobj->u_obj);
    562 }
    563 
    564 
    565 
    566 /*
    567  * uao_init: set up aobj pager subsystem
    568  *
    569  * => called at boot time from uvm_pager_init()
    570  */
    571 void
    572 uao_init()
    573 {
    574 	static int uao_initialized;
    575 
    576 	if (uao_initialized)
    577 		return;
    578 	uao_initialized = TRUE;
    579 
    580 	LIST_INIT(&uao_list);
    581 	simple_lock_init(&uao_list_lock);
    582 
    583 	/*
    584 	 * NOTE: Pages fror this pool must not come from a pageable
    585 	 * kernel map!
    586 	 */
    587 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    588 	    0, 0, 0, "uaoeltpl", 0, NULL, NULL, M_UVMAOBJ);
    589 
    590 	pool_init(&uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0,
    591 	    "aobjpl", 0,
    592 	    pool_page_alloc_nointr, pool_page_free_nointr, M_UVMAOBJ);
    593 }
    594 
    595 /*
    596  * uao_reference: add a ref to an aobj
    597  *
    598  * => aobj must be unlocked
    599  * => just lock it and call the locked version
    600  */
    601 void
    602 uao_reference(uobj)
    603 	struct uvm_object *uobj;
    604 {
    605 	simple_lock(&uobj->vmobjlock);
    606 	uao_reference_locked(uobj);
    607 	simple_unlock(&uobj->vmobjlock);
    608 }
    609 
    610 /*
    611  * uao_reference_locked: add a ref to an aobj that is already locked
    612  *
    613  * => aobj must be locked
    614  * this needs to be separate from the normal routine
    615  * since sometimes we need to add a reference to an aobj when
    616  * it's already locked.
    617  */
    618 void
    619 uao_reference_locked(uobj)
    620 	struct uvm_object *uobj;
    621 {
    622 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    623 
    624 	/*
    625  	 * kernel_object already has plenty of references, leave it alone.
    626  	 */
    627 
    628 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    629 		return;
    630 
    631 	uobj->uo_refs++;		/* bump! */
    632 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    633 		    uobj, uobj->uo_refs,0,0);
    634 }
    635 
    636 
    637 /*
    638  * uao_detach: drop a reference to an aobj
    639  *
    640  * => aobj must be unlocked
    641  * => just lock it and call the locked version
    642  */
    643 void
    644 uao_detach(uobj)
    645 	struct uvm_object *uobj;
    646 {
    647 	simple_lock(&uobj->vmobjlock);
    648 	uao_detach_locked(uobj);
    649 }
    650 
    651 
    652 /*
    653  * uao_detach_locked: drop a reference to an aobj
    654  *
    655  * => aobj must be locked, and is unlocked (or freed) upon return.
    656  * this needs to be separate from the normal routine
    657  * since sometimes we need to detach from an aobj when
    658  * it's already locked.
    659  */
    660 void
    661 uao_detach_locked(uobj)
    662 	struct uvm_object *uobj;
    663 {
    664 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    665 	struct vm_page *pg;
    666 	boolean_t busybody;
    667 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    668 
    669 	/*
    670  	 * detaching from kernel_object is a noop.
    671  	 */
    672 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    673 		simple_unlock(&uobj->vmobjlock);
    674 		return;
    675 	}
    676 
    677 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    678 	uobj->uo_refs--;				/* drop ref! */
    679 	if (uobj->uo_refs) {				/* still more refs? */
    680 		simple_unlock(&uobj->vmobjlock);
    681 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    682 		return;
    683 	}
    684 
    685 	/*
    686  	 * remove the aobj from the global list.
    687  	 */
    688 	simple_lock(&uao_list_lock);
    689 	LIST_REMOVE(aobj, u_list);
    690 	simple_unlock(&uao_list_lock);
    691 
    692 	/*
    693  	 * free all the pages that aren't PG_BUSY,
    694 	 * mark for release any that are.
    695  	 */
    696 	busybody = FALSE;
    697 	for (pg = TAILQ_FIRST(&uobj->memq);
    698 	     pg != NULL;
    699 	     pg = TAILQ_NEXT(pg, listq)) {
    700 		if (pg->flags & PG_BUSY) {
    701 			pg->flags |= PG_RELEASED;
    702 			busybody = TRUE;
    703 			continue;
    704 		}
    705 
    706 		/* zap the mappings, free the swap slot, free the page */
    707 		pmap_page_protect(pg, VM_PROT_NONE);
    708 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    709 		uvm_lock_pageq();
    710 		uvm_pagefree(pg);
    711 		uvm_unlock_pageq();
    712 	}
    713 
    714 	/*
    715  	 * if we found any busy pages, we're done for now.
    716  	 * mark the aobj for death, releasepg will finish up for us.
    717  	 */
    718 	if (busybody) {
    719 		aobj->u_flags |= UAO_FLAG_KILLME;
    720 		simple_unlock(&aobj->u_obj.vmobjlock);
    721 		return;
    722 	}
    723 
    724 	/*
    725  	 * finally, free the rest.
    726  	 */
    727 	uao_free(aobj);
    728 }
    729 
    730 /*
    731  * uao_flush: "flush" pages out of a uvm object
    732  *
    733  * => object should be locked by caller.  we may _unlock_ the object
    734  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    735  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    736  *	XXXJRT a page, we simply just deactivate it.  Should probably
    737  *	XXXJRT handle this better, in the future (although "flushing"
    738  *	XXXJRT anonymous memory isn't terribly important).
    739  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    740  *	or block.
    741  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    742  *	for flushing.
    743  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    744  *	that new pages are inserted on the tail end of the list.  thus,
    745  *	we can make a complete pass through the object in one go by starting
    746  *	at the head and working towards the tail (new pages are put in
    747  *	front of us).
    748  * => NOTE: we are allowed to lock the page queues, so the caller
    749  *	must not be holding the lock on them [e.g. pagedaemon had
    750  *	better not call us with the queues locked]
    751  * => we return TRUE unless we encountered some sort of I/O error
    752  *	XXXJRT currently never happens, as we never directly initiate
    753  *	XXXJRT I/O
    754  *
    755  * comment on "cleaning" object and PG_BUSY pages:
    756  *	this routine is holding the lock on the object.  the only time
    757  *	that is can run into a PG_BUSY page that it does not own is if
    758  *	some other process has started I/O on the page (e.g. either
    759  *	a pagein or a pageout).  if the PG_BUSY page is being paged
    760  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    761  *	had a change to modify it yet.  if the PG_BUSY page is being
    762  *	paged out then it means that someone else has already started
    763  *	cleaning the page for us (how nice!).  in this case, if we
    764  *	have syncio specified, then after we make our pass through the
    765  *	object we need to wait for the other PG_BUSY pages to clear
    766  *	off (i.e. we need to do an iosync).  also note that once a
    767  *	page is PG_BUSY is must stary in its object until it is un-busyed.
    768  *	XXXJRT We never actually do this, as we are "flushing" anonymous
    769  *	XXXJRT memory, which doesn't have persistent backing store.
    770  *
    771  * note on page traversal:
    772  *	we can traverse the pages in an object either by going down the
    773  *	linked list in "uobj->memq", or we can go over the address range
    774  *	by page doing hash table lookups for each address.  depending
    775  *	on how many pages are in the object it may be cheaper to do one
    776  *	or the other.  we set "by_list" to true if we are using memq.
    777  *	if the cost of a hash lookup was equal to the cost of the list
    778  *	traversal we could compare the number of pages in the start->stop
    779  *	range to the total number of pages in the object.  however, it
    780  *	seems that a hash table lookup is more expensive than the linked
    781  *	list traversal, so we multiply the number of pages in the
    782  *	start->stop range by a penalty which we define below.
    783  */
    784 
    785 #define	UAO_HASH_PENALTY 4	/* XXX: a guess */
    786 
    787 boolean_t
    788 uao_flush(uobj, start, stop, flags)
    789 	struct uvm_object *uobj;
    790 	voff_t start, stop;
    791 	int flags;
    792 {
    793 	struct uvm_aobj *aobj = (struct uvm_aobj *) uobj;
    794 	struct vm_page *pp, *ppnext;
    795 	boolean_t retval, by_list;
    796 	voff_t curoff;
    797 	UVMHIST_FUNC("uao_flush"); UVMHIST_CALLED(maphist);
    798 
    799 	curoff = 0;	/* XXX: shut up gcc */
    800 
    801 	retval = TRUE;	/* default to success */
    802 
    803 	if (flags & PGO_ALLPAGES) {
    804 		start = 0;
    805 		stop = aobj->u_pages << PAGE_SHIFT;
    806 		by_list = TRUE;		/* always go by the list */
    807 	} else {
    808 		start = trunc_page(start);
    809 		stop = round_page(stop);
    810 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    811 			printf("uao_flush: strange, got an out of range "
    812 			    "flush (fixed)\n");
    813 			stop = aobj->u_pages << PAGE_SHIFT;
    814 		}
    815 		by_list = (uobj->uo_npages <=
    816 		    ((stop - start) >> PAGE_SHIFT) * UAO_HASH_PENALTY);
    817 	}
    818 
    819 	UVMHIST_LOG(maphist,
    820 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    821 	    start, stop, by_list, flags);
    822 
    823 	/*
    824 	 * Don't need to do any work here if we're not freeing
    825 	 * or deactivating pages.
    826 	 */
    827 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    828 		UVMHIST_LOG(maphist,
    829 		    "<- done (no work to do)",0,0,0,0);
    830 		return (retval);
    831 	}
    832 
    833 	/*
    834 	 * now do it.  note: we must update ppnext in the body of loop or we
    835 	 * will get stuck.  we need to use ppnext because we may free "pp"
    836 	 * before doing the next loop.
    837 	 */
    838 
    839 	if (by_list) {
    840 		pp = uobj->memq.tqh_first;
    841 	} else {
    842 		curoff = start;
    843 		pp = uvm_pagelookup(uobj, curoff);
    844 	}
    845 
    846 	ppnext = NULL;	/* XXX: shut up gcc */
    847 	uvm_lock_pageq();	/* page queues locked */
    848 
    849 	/* locked: both page queues and uobj */
    850 	for ( ; (by_list && pp != NULL) ||
    851 	    (!by_list && curoff < stop) ; pp = ppnext) {
    852 		if (by_list) {
    853 			ppnext = TAILQ_NEXT(pp, listq);
    854 
    855 			/* range check */
    856 			if (pp->offset < start || pp->offset >= stop)
    857 				continue;
    858 		} else {
    859 			curoff += PAGE_SIZE;
    860 			if (curoff < stop)
    861 				ppnext = uvm_pagelookup(uobj, curoff);
    862 
    863 			/* null check */
    864 			if (pp == NULL)
    865 				continue;
    866 		}
    867 
    868 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    869 		/*
    870 		 * XXX In these first 3 cases, we always just
    871 		 * XXX deactivate the page.  We may want to
    872 		 * XXX handle the different cases more specifically
    873 		 * XXX in the future.
    874 		 */
    875 		case PGO_CLEANIT|PGO_FREE:
    876 		case PGO_CLEANIT|PGO_DEACTIVATE:
    877 		case PGO_DEACTIVATE:
    878  deactivate_it:
    879 			/* skip the page if it's loaned or wired */
    880 			if (pp->loan_count != 0 ||
    881 			    pp->wire_count != 0)
    882 				continue;
    883 
    884 			/* ...and deactivate the page. */
    885 			pmap_clear_reference(pp);
    886 			uvm_pagedeactivate(pp);
    887 
    888 			continue;
    889 
    890 		case PGO_FREE:
    891 			/*
    892 			 * If there are multiple references to
    893 			 * the object, just deactivate the page.
    894 			 */
    895 			if (uobj->uo_refs > 1)
    896 				goto deactivate_it;
    897 
    898 			/* XXX skip the page if it's loaned or wired */
    899 			if (pp->loan_count != 0 ||
    900 			    pp->wire_count != 0)
    901 				continue;
    902 
    903 			/*
    904 			 * mark the page as released if its busy.
    905 			 */
    906 			if (pp->flags & PG_BUSY) {
    907 				pp->flags |= PG_RELEASED;
    908 				continue;
    909 			}
    910 
    911 			/* zap all mappings for the page. */
    912 			pmap_page_protect(pp, VM_PROT_NONE);
    913 
    914 			uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
    915 			uvm_pagefree(pp);
    916 
    917 			continue;
    918 
    919 		default:
    920 			panic("uao_flush: weird flags");
    921 		}
    922 	}
    923 
    924 	uvm_unlock_pageq();
    925 
    926 	UVMHIST_LOG(maphist,
    927 	    "<- done, rv=%d",retval,0,0,0);
    928 	return (retval);
    929 }
    930 
    931 /*
    932  * uao_get: fetch me a page
    933  *
    934  * we have three cases:
    935  * 1: page is resident     -> just return the page.
    936  * 2: page is zero-fill    -> allocate a new page and zero it.
    937  * 3: page is swapped out  -> fetch the page from swap.
    938  *
    939  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    940  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    941  * then we will need to return EBUSY.
    942  *
    943  * => prefer map unlocked (not required)
    944  * => object must be locked!  we will _unlock_ it before starting any I/O.
    945  * => flags: PGO_ALLPAGES: get all of the pages
    946  *           PGO_LOCKED: fault data structures are locked
    947  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    948  * => NOTE: caller must check for released pages!!
    949  */
    950 static int
    951 uao_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags)
    952 	struct uvm_object *uobj;
    953 	voff_t offset;
    954 	struct vm_page **pps;
    955 	int *npagesp;
    956 	int centeridx, advice, flags;
    957 	vm_prot_t access_type;
    958 {
    959 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    960 	voff_t current_offset;
    961 	vm_page_t ptmp;
    962 	int lcv, gotpages, maxpages, swslot, rv, pageidx;
    963 	boolean_t done;
    964 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    965 
    966 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    967 		    aobj, offset, flags,0);
    968 
    969 	/*
    970  	 * get number of pages
    971  	 */
    972 	maxpages = *npagesp;
    973 
    974 	/*
    975  	 * step 1: handled the case where fault data structures are locked.
    976  	 */
    977 
    978 	if (flags & PGO_LOCKED) {
    979 		/*
    980  		 * step 1a: get pages that are already resident.   only do
    981 		 * this if the data structures are locked (i.e. the first
    982 		 * time through).
    983  		 */
    984 
    985 		done = TRUE;	/* be optimistic */
    986 		gotpages = 0;	/* # of pages we got so far */
    987 
    988 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    989 		    lcv++, current_offset += PAGE_SIZE) {
    990 			/* do we care about this page?  if not, skip it */
    991 			if (pps[lcv] == PGO_DONTCARE)
    992 				continue;
    993 
    994 			ptmp = uvm_pagelookup(uobj, current_offset);
    995 
    996 			/*
    997  			 * if page is new, attempt to allocate the page,
    998 			 * zero-fill'd.
    999  			 */
   1000 			if (ptmp == NULL && uao_find_swslot(aobj,
   1001 			    current_offset >> PAGE_SHIFT) == 0) {
   1002 				ptmp = uvm_pagealloc(uobj, current_offset,
   1003 				    NULL, UVM_PGA_ZERO);
   1004 				if (ptmp) {
   1005 					/* new page */
   1006 					ptmp->flags &= ~(PG_BUSY|PG_FAKE);
   1007 					ptmp->pqflags |= PQ_AOBJ;
   1008 					UVM_PAGE_OWN(ptmp, NULL);
   1009 				}
   1010 			}
   1011 
   1012 			/*
   1013 			 * to be useful must get a non-busy, non-released page
   1014 			 */
   1015 			if (ptmp == NULL ||
   1016 			    (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1017 				if (lcv == centeridx ||
   1018 				    (flags & PGO_ALLPAGES) != 0)
   1019 					/* need to do a wait or I/O! */
   1020 					done = FALSE;
   1021 					continue;
   1022 			}
   1023 
   1024 			/*
   1025 			 * useful page: busy/lock it and plug it in our
   1026 			 * result array
   1027 			 */
   1028 			/* caller must un-busy this page */
   1029 			ptmp->flags |= PG_BUSY;
   1030 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1031 			pps[lcv] = ptmp;
   1032 			gotpages++;
   1033 
   1034 		}	/* "for" lcv loop */
   1035 
   1036 		/*
   1037  		 * step 1b: now we've either done everything needed or we
   1038 		 * to unlock and do some waiting or I/O.
   1039  		 */
   1040 
   1041 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1042 
   1043 		*npagesp = gotpages;
   1044 		if (done)
   1045 			/* bingo! */
   1046 			return(0);
   1047 		else
   1048 			/* EEK!   Need to unlock and I/O */
   1049 			return(EBUSY);
   1050 	}
   1051 
   1052 	/*
   1053  	 * step 2: get non-resident or busy pages.
   1054  	 * object is locked.   data structures are unlocked.
   1055  	 */
   1056 
   1057 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1058 	    lcv++, current_offset += PAGE_SIZE) {
   1059 
   1060 		/*
   1061 		 * - skip over pages we've already gotten or don't want
   1062 		 * - skip over pages we don't _have_ to get
   1063 		 */
   1064 
   1065 		if (pps[lcv] != NULL ||
   1066 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1067 			continue;
   1068 
   1069 		pageidx = current_offset >> PAGE_SHIFT;
   1070 
   1071 		/*
   1072  		 * we have yet to locate the current page (pps[lcv]).   we
   1073 		 * first look for a page that is already at the current offset.
   1074 		 * if we find a page, we check to see if it is busy or
   1075 		 * released.  if that is the case, then we sleep on the page
   1076 		 * until it is no longer busy or released and repeat the lookup.
   1077 		 * if the page we found is neither busy nor released, then we
   1078 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1079 		 * 'break's the following while loop and indicates we are
   1080 		 * ready to move on to the next page in the "lcv" loop above.
   1081  		 *
   1082  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1083 		 * then it means that we allocated a new busy/fake/clean page
   1084 		 * ptmp in the object and we need to do I/O to fill in the data.
   1085  		 */
   1086 
   1087 		/* top of "pps" while loop */
   1088 		while (pps[lcv] == NULL) {
   1089 			/* look for a resident page */
   1090 			ptmp = uvm_pagelookup(uobj, current_offset);
   1091 
   1092 			/* not resident?   allocate one now (if we can) */
   1093 			if (ptmp == NULL) {
   1094 
   1095 				ptmp = uvm_pagealloc(uobj, current_offset,
   1096 				    NULL, 0);
   1097 
   1098 				/* out of RAM? */
   1099 				if (ptmp == NULL) {
   1100 					simple_unlock(&uobj->vmobjlock);
   1101 					UVMHIST_LOG(pdhist,
   1102 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1103 					uvm_wait("uao_getpage");
   1104 					simple_lock(&uobj->vmobjlock);
   1105 					/* goto top of pps while loop */
   1106 					continue;
   1107 				}
   1108 
   1109 				/*
   1110 				 * safe with PQ's unlocked: because we just
   1111 				 * alloc'd the page
   1112 				 */
   1113 				ptmp->pqflags |= PQ_AOBJ;
   1114 
   1115 				/*
   1116 				 * got new page ready for I/O.  break pps while
   1117 				 * loop.  pps[lcv] is still NULL.
   1118 				 */
   1119 				break;
   1120 			}
   1121 
   1122 			/* page is there, see if we need to wait on it */
   1123 			if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) {
   1124 				ptmp->flags |= PG_WANTED;
   1125 				UVMHIST_LOG(pdhist,
   1126 				    "sleeping, ptmp->flags 0x%x\n",
   1127 				    ptmp->flags,0,0,0);
   1128 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1129 				    FALSE, "uao_get", 0);
   1130 				simple_lock(&uobj->vmobjlock);
   1131 				continue;	/* goto top of pps while loop */
   1132 			}
   1133 
   1134 			/*
   1135  			 * if we get here then the page has become resident and
   1136 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1137 			 * own it) and set pps[lcv] (so that we exit the while
   1138 			 * loop).
   1139  			 */
   1140 			/* we own it, caller must un-busy */
   1141 			ptmp->flags |= PG_BUSY;
   1142 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1143 			pps[lcv] = ptmp;
   1144 		}
   1145 
   1146 		/*
   1147  		 * if we own the valid page at the correct offset, pps[lcv] will
   1148  		 * point to it.   nothing more to do except go to the next page.
   1149  		 */
   1150 		if (pps[lcv])
   1151 			continue;			/* next lcv */
   1152 
   1153 		/*
   1154  		 * we have a "fake/busy/clean" page that we just allocated.
   1155  		 * do the needed "i/o", either reading from swap or zeroing.
   1156  		 */
   1157 		swslot = uao_find_swslot(aobj, pageidx);
   1158 
   1159 		/*
   1160  		 * just zero the page if there's nothing in swap.
   1161  		 */
   1162 		if (swslot == 0)
   1163 		{
   1164 			/*
   1165 			 * page hasn't existed before, just zero it.
   1166 			 */
   1167 			uvm_pagezero(ptmp);
   1168 		} else {
   1169 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1170 			     swslot, 0,0,0);
   1171 
   1172 			/*
   1173 			 * page in the swapped-out page.
   1174 			 * unlock object for i/o, relock when done.
   1175 			 */
   1176 			simple_unlock(&uobj->vmobjlock);
   1177 			rv = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1178 			simple_lock(&uobj->vmobjlock);
   1179 
   1180 			/*
   1181 			 * I/O done.  check for errors.
   1182 			 */
   1183 			if (rv != 0)
   1184 			{
   1185 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1186 				    rv,0,0,0);
   1187 				if (ptmp->flags & PG_WANTED)
   1188 					wakeup(ptmp);
   1189 
   1190 				/*
   1191 				 * remove the swap slot from the aobj
   1192 				 * and mark the aobj as having no real slot.
   1193 				 * don't free the swap slot, thus preventing
   1194 				 * it from being used again.
   1195 				 */
   1196 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1197 							SWSLOT_BAD);
   1198 				uvm_swap_markbad(swslot, 1);
   1199 
   1200 				ptmp->flags &= ~(PG_WANTED|PG_BUSY);
   1201 				UVM_PAGE_OWN(ptmp, NULL);
   1202 				uvm_lock_pageq();
   1203 				uvm_pagefree(ptmp);
   1204 				uvm_unlock_pageq();
   1205 
   1206 				simple_unlock(&uobj->vmobjlock);
   1207 				return (rv);
   1208 			}
   1209 		}
   1210 
   1211 		/*
   1212  		 * we got the page!   clear the fake flag (indicates valid
   1213 		 * data now in page) and plug into our result array.   note
   1214 		 * that page is still busy.
   1215  		 *
   1216  		 * it is the callers job to:
   1217  		 * => check if the page is released
   1218  		 * => unbusy the page
   1219  		 * => activate the page
   1220  		 */
   1221 
   1222 		ptmp->flags &= ~PG_FAKE;		/* data is valid ... */
   1223 		pmap_clear_modify(ptmp);		/* ... and clean */
   1224 		pps[lcv] = ptmp;
   1225 
   1226 	}	/* lcv loop */
   1227 
   1228 	/*
   1229  	 * finally, unlock object and return.
   1230  	 */
   1231 
   1232 	simple_unlock(&uobj->vmobjlock);
   1233 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1234 	return(0);
   1235 }
   1236 
   1237 /*
   1238  * uao_releasepg: handle released page in an aobj
   1239  *
   1240  * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
   1241  *      to dispose of.
   1242  * => caller must handle PG_WANTED case
   1243  * => called with page's object locked, pageq's unlocked
   1244  * => returns TRUE if page's object is still alive, FALSE if we
   1245  *      killed the page's object.    if we return TRUE, then we
   1246  *      return with the object locked.
   1247  * => if (nextpgp != NULL) => we return the next page on the queue, and return
   1248  *                              with the page queues locked [for pagedaemon]
   1249  * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
   1250  * => we kill the aobj if it is not referenced and we are suppose to
   1251  *      kill it ("KILLME").
   1252  */
   1253 static boolean_t
   1254 uao_releasepg(pg, nextpgp)
   1255 	struct vm_page *pg;
   1256 	struct vm_page **nextpgp;	/* OUT */
   1257 {
   1258 	struct uvm_aobj *aobj = (struct uvm_aobj *) pg->uobject;
   1259 
   1260 	KASSERT(pg->flags & PG_RELEASED);
   1261 
   1262 	/*
   1263  	 * dispose of the page [caller handles PG_WANTED] and swap slot.
   1264  	 */
   1265 	pmap_page_protect(pg, VM_PROT_NONE);
   1266 	uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
   1267 	uvm_lock_pageq();
   1268 	if (nextpgp)
   1269 		*nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
   1270 	uvm_pagefree(pg);
   1271 	if (!nextpgp)
   1272 		uvm_unlock_pageq();		/* keep locked for daemon */
   1273 
   1274 	/*
   1275  	 * if we're not killing the object, we're done.
   1276  	 */
   1277 	if ((aobj->u_flags & UAO_FLAG_KILLME) == 0)
   1278 		return TRUE;
   1279 	KASSERT(aobj->u_obj.uo_refs == 0);
   1280 
   1281 	/*
   1282  	 * if there are still pages in the object, we're done for now.
   1283  	 */
   1284 	if (aobj->u_obj.uo_npages != 0)
   1285 		return TRUE;
   1286 
   1287 	KASSERT(TAILQ_EMPTY(&aobj->u_obj.memq));
   1288 
   1289 	/*
   1290  	 * finally, free the rest.
   1291  	 */
   1292 	uao_free(aobj);
   1293 
   1294 	return FALSE;
   1295 }
   1296 
   1297 
   1298 /*
   1299  * uao_dropswap:  release any swap resources from this aobj page.
   1300  *
   1301  * => aobj must be locked or have a reference count of 0.
   1302  */
   1303 
   1304 void
   1305 uao_dropswap(uobj, pageidx)
   1306 	struct uvm_object *uobj;
   1307 	int pageidx;
   1308 {
   1309 	int slot;
   1310 
   1311 	slot = uao_set_swslot(uobj, pageidx, 0);
   1312 	if (slot) {
   1313 		uvm_swap_free(slot, 1);
   1314 	}
   1315 }
   1316 
   1317 
   1318 /*
   1319  * page in every page in every aobj that is paged-out to a range of swslots.
   1320  *
   1321  * => nothing should be locked.
   1322  * => returns TRUE if pagein was aborted due to lack of memory.
   1323  */
   1324 boolean_t
   1325 uao_swap_off(startslot, endslot)
   1326 	int startslot, endslot;
   1327 {
   1328 	struct uvm_aobj *aobj, *nextaobj;
   1329 
   1330 	/*
   1331 	 * walk the list of all aobjs.
   1332 	 */
   1333 
   1334 restart:
   1335 	simple_lock(&uao_list_lock);
   1336 
   1337 	for (aobj = LIST_FIRST(&uao_list);
   1338 	     aobj != NULL;
   1339 	     aobj = nextaobj) {
   1340 		boolean_t rv;
   1341 
   1342 		/*
   1343 		 * try to get the object lock,
   1344 		 * start all over if we fail.
   1345 		 * most of the time we'll get the aobj lock,
   1346 		 * so this should be a rare case.
   1347 		 */
   1348 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
   1349 			simple_unlock(&uao_list_lock);
   1350 			goto restart;
   1351 		}
   1352 
   1353 		/*
   1354 		 * add a ref to the aobj so it doesn't disappear
   1355 		 * while we're working.
   1356 		 */
   1357 		uao_reference_locked(&aobj->u_obj);
   1358 
   1359 		/*
   1360 		 * now it's safe to unlock the uao list.
   1361 		 */
   1362 		simple_unlock(&uao_list_lock);
   1363 
   1364 		/*
   1365 		 * page in any pages in the swslot range.
   1366 		 * if there's an error, abort and return the error.
   1367 		 */
   1368 		rv = uao_pagein(aobj, startslot, endslot);
   1369 		if (rv) {
   1370 			uao_detach_locked(&aobj->u_obj);
   1371 			return rv;
   1372 		}
   1373 
   1374 		/*
   1375 		 * we're done with this aobj.
   1376 		 * relock the list and drop our ref on the aobj.
   1377 		 */
   1378 		simple_lock(&uao_list_lock);
   1379 		nextaobj = LIST_NEXT(aobj, u_list);
   1380 		uao_detach_locked(&aobj->u_obj);
   1381 	}
   1382 
   1383 	/*
   1384 	 * done with traversal, unlock the list
   1385 	 */
   1386 	simple_unlock(&uao_list_lock);
   1387 	return FALSE;
   1388 }
   1389 
   1390 
   1391 /*
   1392  * page in any pages from aobj in the given range.
   1393  *
   1394  * => aobj must be locked and is returned locked.
   1395  * => returns TRUE if pagein was aborted due to lack of memory.
   1396  */
   1397 static boolean_t
   1398 uao_pagein(aobj, startslot, endslot)
   1399 	struct uvm_aobj *aobj;
   1400 	int startslot, endslot;
   1401 {
   1402 	boolean_t rv;
   1403 
   1404 	if (UAO_USES_SWHASH(aobj)) {
   1405 		struct uao_swhash_elt *elt;
   1406 		int bucket;
   1407 
   1408 restart:
   1409 		for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) {
   1410 			for (elt = LIST_FIRST(&aobj->u_swhash[bucket]);
   1411 			     elt != NULL;
   1412 			     elt = LIST_NEXT(elt, list)) {
   1413 				int i;
   1414 
   1415 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1416 					int slot = elt->slots[i];
   1417 
   1418 					/*
   1419 					 * if the slot isn't in range, skip it.
   1420 					 */
   1421 					if (slot < startslot ||
   1422 					    slot >= endslot) {
   1423 						continue;
   1424 					}
   1425 
   1426 					/*
   1427 					 * process the page,
   1428 					 * the start over on this object
   1429 					 * since the swhash elt
   1430 					 * may have been freed.
   1431 					 */
   1432 					rv = uao_pagein_page(aobj,
   1433 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1434 					if (rv) {
   1435 						return rv;
   1436 					}
   1437 					goto restart;
   1438 				}
   1439 			}
   1440 		}
   1441 	} else {
   1442 		int i;
   1443 
   1444 		for (i = 0; i < aobj->u_pages; i++) {
   1445 			int slot = aobj->u_swslots[i];
   1446 
   1447 			/*
   1448 			 * if the slot isn't in range, skip it
   1449 			 */
   1450 			if (slot < startslot || slot >= endslot) {
   1451 				continue;
   1452 			}
   1453 
   1454 			/*
   1455 			 * process the page.
   1456 			 */
   1457 			rv = uao_pagein_page(aobj, i);
   1458 			if (rv) {
   1459 				return rv;
   1460 			}
   1461 		}
   1462 	}
   1463 
   1464 	return FALSE;
   1465 }
   1466 
   1467 /*
   1468  * page in a page from an aobj.  used for swap_off.
   1469  * returns TRUE if pagein was aborted due to lack of memory.
   1470  *
   1471  * => aobj must be locked and is returned locked.
   1472  */
   1473 static boolean_t
   1474 uao_pagein_page(aobj, pageidx)
   1475 	struct uvm_aobj *aobj;
   1476 	int pageidx;
   1477 {
   1478 	struct vm_page *pg;
   1479 	int rv, slot, npages;
   1480 
   1481 	pg = NULL;
   1482 	npages = 1;
   1483 	/* locked: aobj */
   1484 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1485 		     &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
   1486 	/* unlocked: aobj */
   1487 
   1488 	/*
   1489 	 * relock and finish up.
   1490 	 */
   1491 	simple_lock(&aobj->u_obj.vmobjlock);
   1492 
   1493 	switch (rv) {
   1494 	case 0:
   1495 		break;
   1496 
   1497 	case EIO:
   1498 	case ERESTART:
   1499 		/*
   1500 		 * nothing more to do on errors.
   1501 		 * ERESTART can only mean that the anon was freed,
   1502 		 * so again there's nothing to do.
   1503 		 */
   1504 		return FALSE;
   1505 
   1506 	}
   1507 	KASSERT((pg->flags & PG_RELEASED) == 0);
   1508 
   1509 	/*
   1510 	 * ok, we've got the page now.
   1511 	 * mark it as dirty, clear its swslot and un-busy it.
   1512 	 */
   1513 	slot = uao_set_swslot(&aobj->u_obj, pageidx, 0);
   1514 	uvm_swap_free(slot, 1);
   1515 	pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE);
   1516 	UVM_PAGE_OWN(pg, NULL);
   1517 
   1518 	/*
   1519 	 * deactivate the page (to put it on a page queue).
   1520 	 */
   1521 	pmap_clear_reference(pg);
   1522 	uvm_lock_pageq();
   1523 	uvm_pagedeactivate(pg);
   1524 	uvm_unlock_pageq();
   1525 
   1526 	return FALSE;
   1527 }
   1528