Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.110.4.1
      1 /*	$NetBSD: uvm_aobj.c,v 1.110.4.1 2011/02/08 16:20:06 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  *
     28  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     29  */
     30 /*
     31  * uvm_aobj.c: anonymous memory uvm_object pager
     32  *
     33  * author: Chuck Silvers <chuq (at) chuq.com>
     34  * started: Jan-1998
     35  *
     36  * - design mostly from Chuck Cranor
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.110.4.1 2011/02/08 16:20:06 bouyer Exp $");
     41 
     42 #include "opt_uvmhist.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/proc.h>
     47 #include <sys/kernel.h>
     48 #include <sys/kmem.h>
     49 #include <sys/pool.h>
     50 
     51 #include <uvm/uvm.h>
     52 
     53 /*
     54  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     55  * to keeping the list of resident pages, it also keeps a list of
     56  * allocated swap blocks.  depending on the size of the aobj this list
     57  * of allocated swap blocks is either stored in an array (small objects)
     58  * or in a hash table (large objects).
     59  */
     60 
     61 /*
     62  * local structures
     63  */
     64 
     65 /*
     66  * for hash tables, we break the address space of the aobj into blocks
     67  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     68  * be a power of two.
     69  */
     70 
     71 #define UAO_SWHASH_CLUSTER_SHIFT 4
     72 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     73 
     74 /* get the "tag" for this page index */
     75 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     76 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     77 
     78 #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     79 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     80 
     81 /* given an ELT and a page index, find the swap slot */
     82 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     83 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     84 
     85 /* given an ELT, return its pageidx base */
     86 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     87 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     88 
     89 /*
     90  * the swhash hash function
     91  */
     92 
     93 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     94 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     95 			    & (AOBJ)->u_swhashmask)])
     96 
     97 /*
     98  * the swhash threshhold determines if we will use an array or a
     99  * hash table to store the list of allocated swap blocks.
    100  */
    101 
    102 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    103 #define UAO_USES_SWHASH(AOBJ) \
    104 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    105 
    106 /*
    107  * the number of buckets in a swhash, with an upper bound
    108  */
    109 
    110 #define UAO_SWHASH_MAXBUCKETS 256
    111 #define UAO_SWHASH_BUCKETS(AOBJ) \
    112 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    113 	     UAO_SWHASH_MAXBUCKETS))
    114 
    115 /*
    116  * uao_swhash_elt: when a hash table is being used, this structure defines
    117  * the format of an entry in the bucket list.
    118  */
    119 
    120 struct uao_swhash_elt {
    121 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    122 	voff_t tag;				/* our 'tag' */
    123 	int count;				/* our number of active slots */
    124 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    125 };
    126 
    127 /*
    128  * uao_swhash: the swap hash table structure
    129  */
    130 
    131 LIST_HEAD(uao_swhash, uao_swhash_elt);
    132 
    133 /*
    134  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    135  * NOTE: Pages for this pool must not come from a pageable kernel map!
    136  */
    137 static struct pool uao_swhash_elt_pool;
    138 
    139 static struct pool_cache uvm_aobj_cache;
    140 
    141 /*
    142  * uvm_aobj: the actual anon-backed uvm_object
    143  *
    144  * => the uvm_object is at the top of the structure, this allows
    145  *   (struct uvm_aobj *) == (struct uvm_object *)
    146  * => only one of u_swslots and u_swhash is used in any given aobj
    147  */
    148 
    149 struct uvm_aobj {
    150 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    151 	pgoff_t u_pages;	 /* number of pages in entire object */
    152 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    153 	int *u_swslots;		 /* array of offset->swapslot mappings */
    154 				 /*
    155 				  * hashtable of offset->swapslot mappings
    156 				  * (u_swhash is an array of bucket heads)
    157 				  */
    158 	struct uao_swhash *u_swhash;
    159 	u_long u_swhashmask;		/* mask for hashtable */
    160 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    161 };
    162 
    163 /*
    164  * local functions
    165  */
    166 
    167 static void	uao_free(struct uvm_aobj *);
    168 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    169 		    int *, int, vm_prot_t, int, int);
    170 static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    171 
    172 static void uao_detach_locked(struct uvm_object *);
    173 static void uao_reference_locked(struct uvm_object *);
    174 
    175 #if defined(VMSWAP)
    176 static struct uao_swhash_elt *uao_find_swhash_elt
    177     (struct uvm_aobj *, int, bool);
    178 
    179 static bool uao_pagein(struct uvm_aobj *, int, int);
    180 static bool uao_pagein_page(struct uvm_aobj *, int);
    181 static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    182 #endif /* defined(VMSWAP) */
    183 
    184 /*
    185  * aobj_pager
    186  *
    187  * note that some functions (e.g. put) are handled elsewhere
    188  */
    189 
    190 const struct uvm_pagerops aobj_pager = {
    191 	.pgo_reference = uao_reference,
    192 	.pgo_detach = uao_detach,
    193 	.pgo_get = uao_get,
    194 	.pgo_put = uao_put,
    195 };
    196 
    197 /*
    198  * uao_list: global list of active aobjs, locked by uao_list_lock
    199  */
    200 
    201 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    202 static kmutex_t uao_list_lock;
    203 
    204 /*
    205  * functions
    206  */
    207 
    208 /*
    209  * hash table/array related functions
    210  */
    211 
    212 #if defined(VMSWAP)
    213 
    214 /*
    215  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    216  * offset.
    217  *
    218  * => the object should be locked by the caller
    219  */
    220 
    221 static struct uao_swhash_elt *
    222 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    223 {
    224 	struct uao_swhash *swhash;
    225 	struct uao_swhash_elt *elt;
    226 	voff_t page_tag;
    227 
    228 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    229 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    230 
    231 	/*
    232 	 * now search the bucket for the requested tag
    233 	 */
    234 
    235 	LIST_FOREACH(elt, swhash, list) {
    236 		if (elt->tag == page_tag) {
    237 			return elt;
    238 		}
    239 	}
    240 	if (!create) {
    241 		return NULL;
    242 	}
    243 
    244 	/*
    245 	 * allocate a new entry for the bucket and init/insert it in
    246 	 */
    247 
    248 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    249 	if (elt == NULL) {
    250 		return NULL;
    251 	}
    252 	LIST_INSERT_HEAD(swhash, elt, list);
    253 	elt->tag = page_tag;
    254 	elt->count = 0;
    255 	memset(elt->slots, 0, sizeof(elt->slots));
    256 	return elt;
    257 }
    258 
    259 /*
    260  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    261  *
    262  * => object must be locked by caller
    263  */
    264 
    265 int
    266 uao_find_swslot(struct uvm_object *uobj, int pageidx)
    267 {
    268 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    269 	struct uao_swhash_elt *elt;
    270 
    271 	/*
    272 	 * if noswap flag is set, then we never return a slot
    273 	 */
    274 
    275 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    276 		return(0);
    277 
    278 	/*
    279 	 * if hashing, look in hash table.
    280 	 */
    281 
    282 	if (UAO_USES_SWHASH(aobj)) {
    283 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    284 		if (elt)
    285 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    286 		else
    287 			return(0);
    288 	}
    289 
    290 	/*
    291 	 * otherwise, look in the array
    292 	 */
    293 
    294 	return(aobj->u_swslots[pageidx]);
    295 }
    296 
    297 /*
    298  * uao_set_swslot: set the swap slot for a page in an aobj.
    299  *
    300  * => setting a slot to zero frees the slot
    301  * => object must be locked by caller
    302  * => we return the old slot number, or -1 if we failed to allocate
    303  *    memory to record the new slot number
    304  */
    305 
    306 int
    307 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    308 {
    309 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    310 	struct uao_swhash_elt *elt;
    311 	int oldslot;
    312 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    313 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    314 	    aobj, pageidx, slot, 0);
    315 
    316 	KASSERT(mutex_owned(&uobj->vmobjlock) || uobj->uo_refs == 0);
    317 
    318 	/*
    319 	 * if noswap flag is set, then we can't set a non-zero slot.
    320 	 */
    321 
    322 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    323 		if (slot == 0)
    324 			return(0);
    325 
    326 		printf("uao_set_swslot: uobj = %p\n", uobj);
    327 		panic("uao_set_swslot: NOSWAP object");
    328 	}
    329 
    330 	/*
    331 	 * are we using a hash table?  if so, add it in the hash.
    332 	 */
    333 
    334 	if (UAO_USES_SWHASH(aobj)) {
    335 
    336 		/*
    337 		 * Avoid allocating an entry just to free it again if
    338 		 * the page had not swap slot in the first place, and
    339 		 * we are freeing.
    340 		 */
    341 
    342 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    343 		if (elt == NULL) {
    344 			return slot ? -1 : 0;
    345 		}
    346 
    347 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    348 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    349 
    350 		/*
    351 		 * now adjust the elt's reference counter and free it if we've
    352 		 * dropped it to zero.
    353 		 */
    354 
    355 		if (slot) {
    356 			if (oldslot == 0)
    357 				elt->count++;
    358 		} else {
    359 			if (oldslot)
    360 				elt->count--;
    361 
    362 			if (elt->count == 0) {
    363 				LIST_REMOVE(elt, list);
    364 				pool_put(&uao_swhash_elt_pool, elt);
    365 			}
    366 		}
    367 	} else {
    368 		/* we are using an array */
    369 		oldslot = aobj->u_swslots[pageidx];
    370 		aobj->u_swslots[pageidx] = slot;
    371 	}
    372 	return (oldslot);
    373 }
    374 
    375 #endif /* defined(VMSWAP) */
    376 
    377 /*
    378  * end of hash/array functions
    379  */
    380 
    381 /*
    382  * uao_free: free all resources held by an aobj, and then free the aobj
    383  *
    384  * => the aobj should be dead
    385  */
    386 
    387 static void
    388 uao_free(struct uvm_aobj *aobj)
    389 {
    390 
    391 #if defined(VMSWAP)
    392 	uao_dropswap_range1(aobj, 0, 0);
    393 #endif /* defined(VMSWAP) */
    394 
    395 	mutex_exit(&aobj->u_obj.vmobjlock);
    396 
    397 #if defined(VMSWAP)
    398 	if (UAO_USES_SWHASH(aobj)) {
    399 
    400 		/*
    401 		 * free the hash table itself.
    402 		 */
    403 
    404 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
    405 	} else {
    406 
    407 		/*
    408 		 * free the array itsself.
    409 		 */
    410 
    411 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
    412 	}
    413 #endif /* defined(VMSWAP) */
    414 
    415 	/*
    416 	 * finally free the aobj itself
    417 	 */
    418 
    419 	UVM_OBJ_DESTROY(&aobj->u_obj);
    420 	pool_cache_put(&uvm_aobj_cache, aobj);
    421 }
    422 
    423 /*
    424  * pager functions
    425  */
    426 
    427 /*
    428  * uao_create: create an aobj of the given size and return its uvm_object.
    429  *
    430  * => for normal use, flags are always zero
    431  * => for the kernel object, the flags are:
    432  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    433  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    434  */
    435 
    436 struct uvm_object *
    437 uao_create(vsize_t size, int flags)
    438 {
    439 	static struct uvm_aobj kernel_object_store;
    440 	static int kobj_alloced = 0;
    441 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    442 	struct uvm_aobj *aobj;
    443 	int refs;
    444 
    445 	/*
    446 	 * malloc a new aobj unless we are asked for the kernel object
    447 	 */
    448 
    449 	if (flags & UAO_FLAG_KERNOBJ) {
    450 		KASSERT(!kobj_alloced);
    451 		aobj = &kernel_object_store;
    452 		aobj->u_pages = pages;
    453 		aobj->u_flags = UAO_FLAG_NOSWAP;
    454 		refs = UVM_OBJ_KERN;
    455 		kobj_alloced = UAO_FLAG_KERNOBJ;
    456 	} else if (flags & UAO_FLAG_KERNSWAP) {
    457 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    458 		aobj = &kernel_object_store;
    459 		kobj_alloced = UAO_FLAG_KERNSWAP;
    460 		refs = 0xdeadbeaf; /* XXX: gcc */
    461 	} else {
    462 		aobj = pool_cache_get(&uvm_aobj_cache, PR_WAITOK);
    463 		aobj->u_pages = pages;
    464 		aobj->u_flags = 0;
    465 		refs = 1;
    466 	}
    467 
    468 	/*
    469  	 * allocate hash/array if necessary
    470  	 *
    471  	 * note: in the KERNSWAP case no need to worry about locking since
    472  	 * we are still booting we should be the only thread around.
    473  	 */
    474 
    475 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    476 #if defined(VMSWAP)
    477 		const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
    478 
    479 		/* allocate hash table or array depending on object size */
    480 		if (UAO_USES_SWHASH(aobj)) {
    481 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    482 			    HASH_LIST, kernswap ? false : true,
    483 			    &aobj->u_swhashmask);
    484 			if (aobj->u_swhash == NULL)
    485 				panic("uao_create: hashinit swhash failed");
    486 		} else {
    487 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
    488 			    kernswap ? KM_NOSLEEP : KM_SLEEP);
    489 			if (aobj->u_swslots == NULL)
    490 				panic("uao_create: malloc swslots failed");
    491 		}
    492 #endif /* defined(VMSWAP) */
    493 
    494 		if (flags) {
    495 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    496 			return(&aobj->u_obj);
    497 		}
    498 	}
    499 
    500 	/*
    501  	 * init aobj fields
    502  	 */
    503 
    504 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
    505 
    506 	/*
    507  	 * now that aobj is ready, add it to the global list
    508  	 */
    509 
    510 	mutex_enter(&uao_list_lock);
    511 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    512 	mutex_exit(&uao_list_lock);
    513 	return(&aobj->u_obj);
    514 }
    515 
    516 
    517 
    518 /*
    519  * uao_init: set up aobj pager subsystem
    520  *
    521  * => called at boot time from uvm_pager_init()
    522  */
    523 
    524 void
    525 uao_init(void)
    526 {
    527 	static int uao_initialized;
    528 
    529 	if (uao_initialized)
    530 		return;
    531 	uao_initialized = true;
    532 	LIST_INIT(&uao_list);
    533 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    534 	pool_cache_bootstrap(&uvm_aobj_cache, sizeof(struct uvm_aobj), 0, 0,
    535 	    0, "aobj", NULL, IPL_NONE, NULL, NULL, NULL);
    536 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    537 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
    538 }
    539 
    540 /*
    541  * uao_reference: add a ref to an aobj
    542  *
    543  * => aobj must be unlocked
    544  * => just lock it and call the locked version
    545  */
    546 
    547 void
    548 uao_reference(struct uvm_object *uobj)
    549 {
    550 
    551 	/*
    552  	 * kernel_object already has plenty of references, leave it alone.
    553  	 */
    554 
    555 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    556 		return;
    557 
    558 	mutex_enter(&uobj->vmobjlock);
    559 	uao_reference_locked(uobj);
    560 	mutex_exit(&uobj->vmobjlock);
    561 }
    562 
    563 /*
    564  * uao_reference_locked: add a ref to an aobj that is already locked
    565  *
    566  * => aobj must be locked
    567  * this needs to be separate from the normal routine
    568  * since sometimes we need to add a reference to an aobj when
    569  * it's already locked.
    570  */
    571 
    572 static void
    573 uao_reference_locked(struct uvm_object *uobj)
    574 {
    575 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    576 
    577 	/*
    578  	 * kernel_object already has plenty of references, leave it alone.
    579  	 */
    580 
    581 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    582 		return;
    583 
    584 	uobj->uo_refs++;
    585 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    586 		    uobj, uobj->uo_refs,0,0);
    587 }
    588 
    589 /*
    590  * uao_detach: drop a reference to an aobj
    591  *
    592  * => aobj must be unlocked
    593  * => just lock it and call the locked version
    594  */
    595 
    596 void
    597 uao_detach(struct uvm_object *uobj)
    598 {
    599 
    600 	/*
    601  	 * detaching from kernel_object is a noop.
    602  	 */
    603 
    604 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    605 		return;
    606 
    607 	mutex_enter(&uobj->vmobjlock);
    608 	uao_detach_locked(uobj);
    609 }
    610 
    611 /*
    612  * uao_detach_locked: drop a reference to an aobj
    613  *
    614  * => aobj must be locked, and is unlocked (or freed) upon return.
    615  * this needs to be separate from the normal routine
    616  * since sometimes we need to detach from an aobj when
    617  * it's already locked.
    618  */
    619 
    620 static void
    621 uao_detach_locked(struct uvm_object *uobj)
    622 {
    623 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    624 	struct vm_page *pg;
    625 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    626 
    627 	/*
    628  	 * detaching from kernel_object is a noop.
    629  	 */
    630 
    631 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    632 		mutex_exit(&uobj->vmobjlock);
    633 		return;
    634 	}
    635 
    636 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    637 	uobj->uo_refs--;
    638 	if (uobj->uo_refs) {
    639 		mutex_exit(&uobj->vmobjlock);
    640 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    641 		return;
    642 	}
    643 
    644 	/*
    645  	 * remove the aobj from the global list.
    646  	 */
    647 
    648 	mutex_enter(&uao_list_lock);
    649 	LIST_REMOVE(aobj, u_list);
    650 	mutex_exit(&uao_list_lock);
    651 
    652 	/*
    653  	 * free all the pages left in the aobj.  for each page,
    654 	 * when the page is no longer busy (and thus after any disk i/o that
    655 	 * it's involved in is complete), release any swap resources and
    656 	 * free the page itself.
    657  	 */
    658 
    659 	mutex_enter(&uvm_pageqlock);
    660 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    661 		pmap_page_protect(pg, VM_PROT_NONE);
    662 		if (pg->flags & PG_BUSY) {
    663 			pg->flags |= PG_WANTED;
    664 			mutex_exit(&uvm_pageqlock);
    665 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
    666 			    "uao_det", 0);
    667 			mutex_enter(&uobj->vmobjlock);
    668 			mutex_enter(&uvm_pageqlock);
    669 			continue;
    670 		}
    671 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    672 		uvm_pagefree(pg);
    673 	}
    674 	mutex_exit(&uvm_pageqlock);
    675 
    676 	/*
    677  	 * finally, free the aobj itself.
    678  	 */
    679 
    680 	uao_free(aobj);
    681 }
    682 
    683 /*
    684  * uao_put: flush pages out of a uvm object
    685  *
    686  * => object should be locked by caller.  we may _unlock_ the object
    687  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    688  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    689  *	XXXJRT a page, we simply just deactivate it.  Should probably
    690  *	XXXJRT handle this better, in the future (although "flushing"
    691  *	XXXJRT anonymous memory isn't terribly important).
    692  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    693  *	or block.
    694  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    695  *	for flushing.
    696  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    697  *	that new pages are inserted on the tail end of the list.  thus,
    698  *	we can make a complete pass through the object in one go by starting
    699  *	at the head and working towards the tail (new pages are put in
    700  *	front of us).
    701  * => NOTE: we are allowed to lock the page queues, so the caller
    702  *	must not be holding the lock on them [e.g. pagedaemon had
    703  *	better not call us with the queues locked]
    704  * => we return 0 unless we encountered some sort of I/O error
    705  *	XXXJRT currently never happens, as we never directly initiate
    706  *	XXXJRT I/O
    707  *
    708  * note on page traversal:
    709  *	we can traverse the pages in an object either by going down the
    710  *	linked list in "uobj->memq", or we can go over the address range
    711  *	by page doing hash table lookups for each address.  depending
    712  *	on how many pages are in the object it may be cheaper to do one
    713  *	or the other.  we set "by_list" to true if we are using memq.
    714  *	if the cost of a hash lookup was equal to the cost of the list
    715  *	traversal we could compare the number of pages in the start->stop
    716  *	range to the total number of pages in the object.  however, it
    717  *	seems that a hash table lookup is more expensive than the linked
    718  *	list traversal, so we multiply the number of pages in the
    719  *	start->stop range by a penalty which we define below.
    720  */
    721 
    722 static int
    723 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    724 {
    725 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    726 	struct vm_page *pg, *nextpg, curmp, endmp;
    727 	bool by_list;
    728 	voff_t curoff;
    729 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    730 
    731 	KASSERT(mutex_owned(&uobj->vmobjlock));
    732 
    733 	curoff = 0;
    734 	if (flags & PGO_ALLPAGES) {
    735 		start = 0;
    736 		stop = aobj->u_pages << PAGE_SHIFT;
    737 		by_list = true;		/* always go by the list */
    738 	} else {
    739 		start = trunc_page(start);
    740 		if (stop == 0) {
    741 			stop = aobj->u_pages << PAGE_SHIFT;
    742 		} else {
    743 			stop = round_page(stop);
    744 		}
    745 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    746 			printf("uao_flush: strange, got an out of range "
    747 			    "flush (fixed)\n");
    748 			stop = aobj->u_pages << PAGE_SHIFT;
    749 		}
    750 		by_list = (uobj->uo_npages <=
    751 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    752 	}
    753 	UVMHIST_LOG(maphist,
    754 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    755 	    start, stop, by_list, flags);
    756 
    757 	/*
    758 	 * Don't need to do any work here if we're not freeing
    759 	 * or deactivating pages.
    760 	 */
    761 
    762 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    763 		mutex_exit(&uobj->vmobjlock);
    764 		return 0;
    765 	}
    766 
    767 	/*
    768 	 * Initialize the marker pages.  See the comment in
    769 	 * genfs_putpages() also.
    770 	 */
    771 
    772 	curmp.flags = PG_MARKER;
    773 	endmp.flags = PG_MARKER;
    774 
    775 	/*
    776 	 * now do it.  note: we must update nextpg in the body of loop or we
    777 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    778 	 * because we may free "pg" before doing the next loop.
    779 	 */
    780 
    781 	if (by_list) {
    782 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    783 		nextpg = TAILQ_FIRST(&uobj->memq);
    784 	} else {
    785 		curoff = start;
    786 		nextpg = NULL;	/* Quell compiler warning */
    787 	}
    788 
    789 	/* locked: uobj */
    790 	for (;;) {
    791 		if (by_list) {
    792 			pg = nextpg;
    793 			if (pg == &endmp)
    794 				break;
    795 			nextpg = TAILQ_NEXT(pg, listq.queue);
    796 			if (pg->flags & PG_MARKER)
    797 				continue;
    798 			if (pg->offset < start || pg->offset >= stop)
    799 				continue;
    800 		} else {
    801 			if (curoff < stop) {
    802 				pg = uvm_pagelookup(uobj, curoff);
    803 				curoff += PAGE_SIZE;
    804 			} else
    805 				break;
    806 			if (pg == NULL)
    807 				continue;
    808 		}
    809 
    810 		/*
    811 		 * wait and try again if the page is busy.
    812 		 */
    813 
    814 		if (pg->flags & PG_BUSY) {
    815 			if (by_list) {
    816 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
    817 			}
    818 			pg->flags |= PG_WANTED;
    819 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    820 			    "uao_put", 0);
    821 			mutex_enter(&uobj->vmobjlock);
    822 			if (by_list) {
    823 				nextpg = TAILQ_NEXT(&curmp, listq.queue);
    824 				TAILQ_REMOVE(&uobj->memq, &curmp,
    825 				    listq.queue);
    826 			} else
    827 				curoff -= PAGE_SIZE;
    828 			continue;
    829 		}
    830 
    831 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    832 
    833 		/*
    834 		 * XXX In these first 3 cases, we always just
    835 		 * XXX deactivate the page.  We may want to
    836 		 * XXX handle the different cases more specifically
    837 		 * XXX in the future.
    838 		 */
    839 
    840 		case PGO_CLEANIT|PGO_FREE:
    841 		case PGO_CLEANIT|PGO_DEACTIVATE:
    842 		case PGO_DEACTIVATE:
    843  deactivate_it:
    844 			mutex_enter(&uvm_pageqlock);
    845 			/* skip the page if it's wired */
    846 			if (pg->wire_count == 0) {
    847 				uvm_pagedeactivate(pg);
    848 			}
    849 			mutex_exit(&uvm_pageqlock);
    850 			break;
    851 
    852 		case PGO_FREE:
    853 			/*
    854 			 * If there are multiple references to
    855 			 * the object, just deactivate the page.
    856 			 */
    857 
    858 			if (uobj->uo_refs > 1)
    859 				goto deactivate_it;
    860 
    861 			/*
    862 			 * free the swap slot and the page.
    863 			 */
    864 
    865 			pmap_page_protect(pg, VM_PROT_NONE);
    866 
    867 			/*
    868 			 * freeing swapslot here is not strictly necessary.
    869 			 * however, leaving it here doesn't save much
    870 			 * because we need to update swap accounting anyway.
    871 			 */
    872 
    873 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    874 			mutex_enter(&uvm_pageqlock);
    875 			uvm_pagefree(pg);
    876 			mutex_exit(&uvm_pageqlock);
    877 			break;
    878 
    879 		default:
    880 			panic("%s: impossible", __func__);
    881 		}
    882 	}
    883 	if (by_list) {
    884 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
    885 	}
    886 	mutex_exit(&uobj->vmobjlock);
    887 	return 0;
    888 }
    889 
    890 /*
    891  * uao_get: fetch me a page
    892  *
    893  * we have three cases:
    894  * 1: page is resident     -> just return the page.
    895  * 2: page is zero-fill    -> allocate a new page and zero it.
    896  * 3: page is swapped out  -> fetch the page from swap.
    897  *
    898  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    899  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    900  * then we will need to return EBUSY.
    901  *
    902  * => prefer map unlocked (not required)
    903  * => object must be locked!  we will _unlock_ it before starting any I/O.
    904  * => flags: PGO_ALLPAGES: get all of the pages
    905  *           PGO_LOCKED: fault data structures are locked
    906  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    907  * => NOTE: caller must check for released pages!!
    908  */
    909 
    910 static int
    911 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    912     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    913 {
    914 #if defined(VMSWAP)
    915 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    916 #endif /* defined(VMSWAP) */
    917 	voff_t current_offset;
    918 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    919 	int lcv, gotpages, maxpages, swslot, pageidx;
    920 	bool done;
    921 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    922 
    923 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    924 		    (struct uvm_aobj *)uobj, offset, flags,0);
    925 
    926 	/*
    927  	 * get number of pages
    928  	 */
    929 
    930 	maxpages = *npagesp;
    931 
    932 	/*
    933  	 * step 1: handled the case where fault data structures are locked.
    934  	 */
    935 
    936 	if (flags & PGO_LOCKED) {
    937 
    938 		/*
    939  		 * step 1a: get pages that are already resident.   only do
    940 		 * this if the data structures are locked (i.e. the first
    941 		 * time through).
    942  		 */
    943 
    944 		done = true;	/* be optimistic */
    945 		gotpages = 0;	/* # of pages we got so far */
    946 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    947 		    lcv++, current_offset += PAGE_SIZE) {
    948 			/* do we care about this page?  if not, skip it */
    949 			if (pps[lcv] == PGO_DONTCARE)
    950 				continue;
    951 			ptmp = uvm_pagelookup(uobj, current_offset);
    952 
    953 			/*
    954  			 * if page is new, attempt to allocate the page,
    955 			 * zero-fill'd.
    956  			 */
    957 
    958 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
    959 			    current_offset >> PAGE_SHIFT) == 0) {
    960 				ptmp = uvm_pagealloc(uobj, current_offset,
    961 				    NULL, UVM_PGA_ZERO);
    962 				if (ptmp) {
    963 					/* new page */
    964 					ptmp->flags &= ~(PG_FAKE);
    965 					ptmp->pqflags |= PQ_AOBJ;
    966 					goto gotpage;
    967 				}
    968 			}
    969 
    970 			/*
    971 			 * to be useful must get a non-busy page
    972 			 */
    973 
    974 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
    975 				if (lcv == centeridx ||
    976 				    (flags & PGO_ALLPAGES) != 0)
    977 					/* need to do a wait or I/O! */
    978 					done = false;
    979 					continue;
    980 			}
    981 
    982 			/*
    983 			 * useful page: busy/lock it and plug it in our
    984 			 * result array
    985 			 */
    986 
    987 			/* caller must un-busy this page */
    988 			ptmp->flags |= PG_BUSY;
    989 			UVM_PAGE_OWN(ptmp, "uao_get1");
    990 gotpage:
    991 			pps[lcv] = ptmp;
    992 			gotpages++;
    993 		}
    994 
    995 		/*
    996  		 * step 1b: now we've either done everything needed or we
    997 		 * to unlock and do some waiting or I/O.
    998  		 */
    999 
   1000 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1001 		*npagesp = gotpages;
   1002 		if (done)
   1003 			return 0;
   1004 		else
   1005 			return EBUSY;
   1006 	}
   1007 
   1008 	/*
   1009  	 * step 2: get non-resident or busy pages.
   1010  	 * object is locked.   data structures are unlocked.
   1011  	 */
   1012 
   1013 	if ((flags & PGO_SYNCIO) == 0) {
   1014 		goto done;
   1015 	}
   1016 
   1017 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1018 	    lcv++, current_offset += PAGE_SIZE) {
   1019 
   1020 		/*
   1021 		 * - skip over pages we've already gotten or don't want
   1022 		 * - skip over pages we don't _have_ to get
   1023 		 */
   1024 
   1025 		if (pps[lcv] != NULL ||
   1026 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1027 			continue;
   1028 
   1029 		pageidx = current_offset >> PAGE_SHIFT;
   1030 
   1031 		/*
   1032  		 * we have yet to locate the current page (pps[lcv]).   we
   1033 		 * first look for a page that is already at the current offset.
   1034 		 * if we find a page, we check to see if it is busy or
   1035 		 * released.  if that is the case, then we sleep on the page
   1036 		 * until it is no longer busy or released and repeat the lookup.
   1037 		 * if the page we found is neither busy nor released, then we
   1038 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1039 		 * 'break's the following while loop and indicates we are
   1040 		 * ready to move on to the next page in the "lcv" loop above.
   1041  		 *
   1042  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1043 		 * then it means that we allocated a new busy/fake/clean page
   1044 		 * ptmp in the object and we need to do I/O to fill in the data.
   1045  		 */
   1046 
   1047 		/* top of "pps" while loop */
   1048 		while (pps[lcv] == NULL) {
   1049 			/* look for a resident page */
   1050 			ptmp = uvm_pagelookup(uobj, current_offset);
   1051 
   1052 			/* not resident?   allocate one now (if we can) */
   1053 			if (ptmp == NULL) {
   1054 
   1055 				ptmp = uvm_pagealloc(uobj, current_offset,
   1056 				    NULL, 0);
   1057 
   1058 				/* out of RAM? */
   1059 				if (ptmp == NULL) {
   1060 					mutex_exit(&uobj->vmobjlock);
   1061 					UVMHIST_LOG(pdhist,
   1062 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1063 					uvm_wait("uao_getpage");
   1064 					mutex_enter(&uobj->vmobjlock);
   1065 					continue;
   1066 				}
   1067 
   1068 				/*
   1069 				 * safe with PQ's unlocked: because we just
   1070 				 * alloc'd the page
   1071 				 */
   1072 
   1073 				ptmp->pqflags |= PQ_AOBJ;
   1074 
   1075 				/*
   1076 				 * got new page ready for I/O.  break pps while
   1077 				 * loop.  pps[lcv] is still NULL.
   1078 				 */
   1079 
   1080 				break;
   1081 			}
   1082 
   1083 			/* page is there, see if we need to wait on it */
   1084 			if ((ptmp->flags & PG_BUSY) != 0) {
   1085 				ptmp->flags |= PG_WANTED;
   1086 				UVMHIST_LOG(pdhist,
   1087 				    "sleeping, ptmp->flags 0x%x\n",
   1088 				    ptmp->flags,0,0,0);
   1089 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1090 				    false, "uao_get", 0);
   1091 				mutex_enter(&uobj->vmobjlock);
   1092 				continue;
   1093 			}
   1094 
   1095 			/*
   1096  			 * if we get here then the page has become resident and
   1097 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1098 			 * own it) and set pps[lcv] (so that we exit the while
   1099 			 * loop).
   1100  			 */
   1101 
   1102 			/* we own it, caller must un-busy */
   1103 			ptmp->flags |= PG_BUSY;
   1104 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1105 			pps[lcv] = ptmp;
   1106 		}
   1107 
   1108 		/*
   1109  		 * if we own the valid page at the correct offset, pps[lcv] will
   1110  		 * point to it.   nothing more to do except go to the next page.
   1111  		 */
   1112 
   1113 		if (pps[lcv])
   1114 			continue;			/* next lcv */
   1115 
   1116 		/*
   1117  		 * we have a "fake/busy/clean" page that we just allocated.
   1118  		 * do the needed "i/o", either reading from swap or zeroing.
   1119  		 */
   1120 
   1121 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1122 
   1123 		/*
   1124  		 * just zero the page if there's nothing in swap.
   1125  		 */
   1126 
   1127 		if (swslot == 0) {
   1128 
   1129 			/*
   1130 			 * page hasn't existed before, just zero it.
   1131 			 */
   1132 
   1133 			uvm_pagezero(ptmp);
   1134 		} else {
   1135 #if defined(VMSWAP)
   1136 			int error;
   1137 
   1138 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1139 			     swslot, 0,0,0);
   1140 
   1141 			/*
   1142 			 * page in the swapped-out page.
   1143 			 * unlock object for i/o, relock when done.
   1144 			 */
   1145 
   1146 			mutex_exit(&uobj->vmobjlock);
   1147 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1148 			mutex_enter(&uobj->vmobjlock);
   1149 
   1150 			/*
   1151 			 * I/O done.  check for errors.
   1152 			 */
   1153 
   1154 			if (error != 0) {
   1155 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1156 				    error,0,0,0);
   1157 				if (ptmp->flags & PG_WANTED)
   1158 					wakeup(ptmp);
   1159 
   1160 				/*
   1161 				 * remove the swap slot from the aobj
   1162 				 * and mark the aobj as having no real slot.
   1163 				 * don't free the swap slot, thus preventing
   1164 				 * it from being used again.
   1165 				 */
   1166 
   1167 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1168 							SWSLOT_BAD);
   1169 				if (swslot > 0) {
   1170 					uvm_swap_markbad(swslot, 1);
   1171 				}
   1172 
   1173 				mutex_enter(&uvm_pageqlock);
   1174 				uvm_pagefree(ptmp);
   1175 				mutex_exit(&uvm_pageqlock);
   1176 				mutex_exit(&uobj->vmobjlock);
   1177 				return error;
   1178 			}
   1179 #else /* defined(VMSWAP) */
   1180 			panic("%s: pagein", __func__);
   1181 #endif /* defined(VMSWAP) */
   1182 		}
   1183 
   1184 		if ((access_type & VM_PROT_WRITE) == 0) {
   1185 			ptmp->flags |= PG_CLEAN;
   1186 			pmap_clear_modify(ptmp);
   1187 		}
   1188 
   1189 		/*
   1190  		 * we got the page!   clear the fake flag (indicates valid
   1191 		 * data now in page) and plug into our result array.   note
   1192 		 * that page is still busy.
   1193  		 *
   1194  		 * it is the callers job to:
   1195  		 * => check if the page is released
   1196  		 * => unbusy the page
   1197  		 * => activate the page
   1198  		 */
   1199 
   1200 		ptmp->flags &= ~PG_FAKE;
   1201 		pps[lcv] = ptmp;
   1202 	}
   1203 
   1204 	/*
   1205  	 * finally, unlock object and return.
   1206  	 */
   1207 
   1208 done:
   1209 	mutex_exit(&uobj->vmobjlock);
   1210 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1211 	return 0;
   1212 }
   1213 
   1214 #if defined(VMSWAP)
   1215 
   1216 /*
   1217  * uao_dropswap:  release any swap resources from this aobj page.
   1218  *
   1219  * => aobj must be locked or have a reference count of 0.
   1220  */
   1221 
   1222 void
   1223 uao_dropswap(struct uvm_object *uobj, int pageidx)
   1224 {
   1225 	int slot;
   1226 
   1227 	slot = uao_set_swslot(uobj, pageidx, 0);
   1228 	if (slot) {
   1229 		uvm_swap_free(slot, 1);
   1230 	}
   1231 }
   1232 
   1233 /*
   1234  * page in every page in every aobj that is paged-out to a range of swslots.
   1235  *
   1236  * => nothing should be locked.
   1237  * => returns true if pagein was aborted due to lack of memory.
   1238  */
   1239 
   1240 bool
   1241 uao_swap_off(int startslot, int endslot)
   1242 {
   1243 	struct uvm_aobj *aobj, *nextaobj;
   1244 	bool rv;
   1245 
   1246 	/*
   1247 	 * walk the list of all aobjs.
   1248 	 */
   1249 
   1250 restart:
   1251 	mutex_enter(&uao_list_lock);
   1252 	for (aobj = LIST_FIRST(&uao_list);
   1253 	     aobj != NULL;
   1254 	     aobj = nextaobj) {
   1255 
   1256 		/*
   1257 		 * try to get the object lock, start all over if we fail.
   1258 		 * most of the time we'll get the aobj lock,
   1259 		 * so this should be a rare case.
   1260 		 */
   1261 
   1262 		if (!mutex_tryenter(&aobj->u_obj.vmobjlock)) {
   1263 			mutex_exit(&uao_list_lock);
   1264 			/* XXX Better than yielding but inadequate. */
   1265 			kpause("livelock", false, 1, NULL);
   1266 			goto restart;
   1267 		}
   1268 
   1269 		/*
   1270 		 * add a ref to the aobj so it doesn't disappear
   1271 		 * while we're working.
   1272 		 */
   1273 
   1274 		uao_reference_locked(&aobj->u_obj);
   1275 
   1276 		/*
   1277 		 * now it's safe to unlock the uao list.
   1278 		 */
   1279 
   1280 		mutex_exit(&uao_list_lock);
   1281 
   1282 		/*
   1283 		 * page in any pages in the swslot range.
   1284 		 * if there's an error, abort and return the error.
   1285 		 */
   1286 
   1287 		rv = uao_pagein(aobj, startslot, endslot);
   1288 		if (rv) {
   1289 			uao_detach_locked(&aobj->u_obj);
   1290 			return rv;
   1291 		}
   1292 
   1293 		/*
   1294 		 * we're done with this aobj.
   1295 		 * relock the list and drop our ref on the aobj.
   1296 		 */
   1297 
   1298 		mutex_enter(&uao_list_lock);
   1299 		nextaobj = LIST_NEXT(aobj, u_list);
   1300 		uao_detach_locked(&aobj->u_obj);
   1301 	}
   1302 
   1303 	/*
   1304 	 * done with traversal, unlock the list
   1305 	 */
   1306 	mutex_exit(&uao_list_lock);
   1307 	return false;
   1308 }
   1309 
   1310 
   1311 /*
   1312  * page in any pages from aobj in the given range.
   1313  *
   1314  * => aobj must be locked and is returned locked.
   1315  * => returns true if pagein was aborted due to lack of memory.
   1316  */
   1317 static bool
   1318 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1319 {
   1320 	bool rv;
   1321 
   1322 	if (UAO_USES_SWHASH(aobj)) {
   1323 		struct uao_swhash_elt *elt;
   1324 		int buck;
   1325 
   1326 restart:
   1327 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1328 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1329 			     elt != NULL;
   1330 			     elt = LIST_NEXT(elt, list)) {
   1331 				int i;
   1332 
   1333 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1334 					int slot = elt->slots[i];
   1335 
   1336 					/*
   1337 					 * if the slot isn't in range, skip it.
   1338 					 */
   1339 
   1340 					if (slot < startslot ||
   1341 					    slot >= endslot) {
   1342 						continue;
   1343 					}
   1344 
   1345 					/*
   1346 					 * process the page,
   1347 					 * the start over on this object
   1348 					 * since the swhash elt
   1349 					 * may have been freed.
   1350 					 */
   1351 
   1352 					rv = uao_pagein_page(aobj,
   1353 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1354 					if (rv) {
   1355 						return rv;
   1356 					}
   1357 					goto restart;
   1358 				}
   1359 			}
   1360 		}
   1361 	} else {
   1362 		int i;
   1363 
   1364 		for (i = 0; i < aobj->u_pages; i++) {
   1365 			int slot = aobj->u_swslots[i];
   1366 
   1367 			/*
   1368 			 * if the slot isn't in range, skip it
   1369 			 */
   1370 
   1371 			if (slot < startslot || slot >= endslot) {
   1372 				continue;
   1373 			}
   1374 
   1375 			/*
   1376 			 * process the page.
   1377 			 */
   1378 
   1379 			rv = uao_pagein_page(aobj, i);
   1380 			if (rv) {
   1381 				return rv;
   1382 			}
   1383 		}
   1384 	}
   1385 
   1386 	return false;
   1387 }
   1388 
   1389 /*
   1390  * page in a page from an aobj.  used for swap_off.
   1391  * returns true if pagein was aborted due to lack of memory.
   1392  *
   1393  * => aobj must be locked and is returned locked.
   1394  */
   1395 
   1396 static bool
   1397 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1398 {
   1399 	struct vm_page *pg;
   1400 	int rv, npages;
   1401 
   1402 	pg = NULL;
   1403 	npages = 1;
   1404 	/* locked: aobj */
   1405 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1406 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1407 	/* unlocked: aobj */
   1408 
   1409 	/*
   1410 	 * relock and finish up.
   1411 	 */
   1412 
   1413 	mutex_enter(&aobj->u_obj.vmobjlock);
   1414 	switch (rv) {
   1415 	case 0:
   1416 		break;
   1417 
   1418 	case EIO:
   1419 	case ERESTART:
   1420 
   1421 		/*
   1422 		 * nothing more to do on errors.
   1423 		 * ERESTART can only mean that the anon was freed,
   1424 		 * so again there's nothing to do.
   1425 		 */
   1426 
   1427 		return false;
   1428 
   1429 	default:
   1430 		return true;
   1431 	}
   1432 
   1433 	/*
   1434 	 * ok, we've got the page now.
   1435 	 * mark it as dirty, clear its swslot and un-busy it.
   1436 	 */
   1437 	uao_dropswap(&aobj->u_obj, pageidx);
   1438 
   1439 	/*
   1440 	 * make sure it's on a page queue.
   1441 	 */
   1442 	mutex_enter(&uvm_pageqlock);
   1443 	if (pg->wire_count == 0)
   1444 		uvm_pageenqueue(pg);
   1445 	mutex_exit(&uvm_pageqlock);
   1446 
   1447 	if (pg->flags & PG_WANTED) {
   1448 		wakeup(pg);
   1449 	}
   1450 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1451 	UVM_PAGE_OWN(pg, NULL);
   1452 
   1453 	return false;
   1454 }
   1455 
   1456 /*
   1457  * uao_dropswap_range: drop swapslots in the range.
   1458  *
   1459  * => aobj must be locked and is returned locked.
   1460  * => start is inclusive.  end is exclusive.
   1461  */
   1462 
   1463 void
   1464 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1465 {
   1466 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1467 
   1468 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1469 
   1470 	uao_dropswap_range1(aobj, start, end);
   1471 }
   1472 
   1473 static void
   1474 uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1475 {
   1476 	int swpgonlydelta = 0;
   1477 
   1478 	if (end == 0) {
   1479 		end = INT64_MAX;
   1480 	}
   1481 
   1482 	if (UAO_USES_SWHASH(aobj)) {
   1483 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1484 		voff_t taghi;
   1485 		voff_t taglo;
   1486 
   1487 		taglo = UAO_SWHASH_ELT_TAG(start);
   1488 		taghi = UAO_SWHASH_ELT_TAG(end);
   1489 
   1490 		for (i = 0; i < hashbuckets; i++) {
   1491 			struct uao_swhash_elt *elt, *next;
   1492 
   1493 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1494 			     elt != NULL;
   1495 			     elt = next) {
   1496 				int startidx, endidx;
   1497 				int j;
   1498 
   1499 				next = LIST_NEXT(elt, list);
   1500 
   1501 				if (elt->tag < taglo || taghi < elt->tag) {
   1502 					continue;
   1503 				}
   1504 
   1505 				if (elt->tag == taglo) {
   1506 					startidx =
   1507 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1508 				} else {
   1509 					startidx = 0;
   1510 				}
   1511 
   1512 				if (elt->tag == taghi) {
   1513 					endidx =
   1514 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1515 				} else {
   1516 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1517 				}
   1518 
   1519 				for (j = startidx; j < endidx; j++) {
   1520 					int slot = elt->slots[j];
   1521 
   1522 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1523 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1524 					    + j) << PAGE_SHIFT) == NULL);
   1525 					if (slot > 0) {
   1526 						uvm_swap_free(slot, 1);
   1527 						swpgonlydelta++;
   1528 						KASSERT(elt->count > 0);
   1529 						elt->slots[j] = 0;
   1530 						elt->count--;
   1531 					}
   1532 				}
   1533 
   1534 				if (elt->count == 0) {
   1535 					LIST_REMOVE(elt, list);
   1536 					pool_put(&uao_swhash_elt_pool, elt);
   1537 				}
   1538 			}
   1539 		}
   1540 	} else {
   1541 		int i;
   1542 
   1543 		if (aobj->u_pages < end) {
   1544 			end = aobj->u_pages;
   1545 		}
   1546 		for (i = start; i < end; i++) {
   1547 			int slot = aobj->u_swslots[i];
   1548 
   1549 			if (slot > 0) {
   1550 				uvm_swap_free(slot, 1);
   1551 				swpgonlydelta++;
   1552 			}
   1553 		}
   1554 	}
   1555 
   1556 	/*
   1557 	 * adjust the counter of pages only in swap for all
   1558 	 * the swap slots we've freed.
   1559 	 */
   1560 
   1561 	if (swpgonlydelta > 0) {
   1562 		mutex_enter(&uvm_swap_data_lock);
   1563 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1564 		uvmexp.swpgonly -= swpgonlydelta;
   1565 		mutex_exit(&uvm_swap_data_lock);
   1566 	}
   1567 }
   1568 
   1569 #endif /* defined(VMSWAP) */
   1570