Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.92
      1 /*	$NetBSD: uvm_aobj.c,v 1.92 2007/07/24 19:59:35 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.92 2007/07/24 19:59:35 ad Exp $");
     47 
     48 #include "opt_uvmhist.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/systm.h>
     52 #include <sys/proc.h>
     53 #include <sys/malloc.h>
     54 #include <sys/kernel.h>
     55 #include <sys/pool.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61  * to keeping the list of resident pages, it also keeps a list of
     62  * allocated swap blocks.  depending on the size of the aobj this list
     63  * of allocated swap blocks is either stored in an array (small objects)
     64  * or in a hash table (large objects).
     65  */
     66 
     67 /*
     68  * local structures
     69  */
     70 
     71 /*
     72  * for hash tables, we break the address space of the aobj into blocks
     73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74  * be a power of two.
     75  */
     76 
     77 #define UAO_SWHASH_CLUSTER_SHIFT 4
     78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79 
     80 /* get the "tag" for this page index */
     81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83 
     84 #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     85 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     86 
     87 /* given an ELT and a page index, find the swap slot */
     88 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     90 
     91 /* given an ELT, return its pageidx base */
     92 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94 
     95 /*
     96  * the swhash hash function
     97  */
     98 
     99 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
    100 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    101 			    & (AOBJ)->u_swhashmask)])
    102 
    103 /*
    104  * the swhash threshhold determines if we will use an array or a
    105  * hash table to store the list of allocated swap blocks.
    106  */
    107 
    108 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    109 #define UAO_USES_SWHASH(AOBJ) \
    110 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    111 
    112 /*
    113  * the number of buckets in a swhash, with an upper bound
    114  */
    115 
    116 #define UAO_SWHASH_MAXBUCKETS 256
    117 #define UAO_SWHASH_BUCKETS(AOBJ) \
    118 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    119 	     UAO_SWHASH_MAXBUCKETS))
    120 
    121 
    122 /*
    123  * uao_swhash_elt: when a hash table is being used, this structure defines
    124  * the format of an entry in the bucket list.
    125  */
    126 
    127 struct uao_swhash_elt {
    128 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    129 	voff_t tag;				/* our 'tag' */
    130 	int count;				/* our number of active slots */
    131 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    132 };
    133 
    134 /*
    135  * uao_swhash: the swap hash table structure
    136  */
    137 
    138 LIST_HEAD(uao_swhash, uao_swhash_elt);
    139 
    140 /*
    141  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    142  * NOTE: Pages for this pool must not come from a pageable kernel map!
    143  */
    144 POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
    145     "uaoeltpl", NULL, IPL_VM);
    146 
    147 /*
    148  * uvm_aobj: the actual anon-backed uvm_object
    149  *
    150  * => the uvm_object is at the top of the structure, this allows
    151  *   (struct uvm_aobj *) == (struct uvm_object *)
    152  * => only one of u_swslots and u_swhash is used in any given aobj
    153  */
    154 
    155 struct uvm_aobj {
    156 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    157 	pgoff_t u_pages;	 /* number of pages in entire object */
    158 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    159 	int *u_swslots;		 /* array of offset->swapslot mappings */
    160 				 /*
    161 				  * hashtable of offset->swapslot mappings
    162 				  * (u_swhash is an array of bucket heads)
    163 				  */
    164 	struct uao_swhash *u_swhash;
    165 	u_long u_swhashmask;		/* mask for hashtable */
    166 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    167 };
    168 
    169 /*
    170  * uvm_aobj_pool: pool of uvm_aobj structures
    171  */
    172 POOL_INIT(uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, "aobjpl",
    173     &pool_allocator_nointr, IPL_NONE);
    174 
    175 MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
    176 
    177 /*
    178  * local functions
    179  */
    180 
    181 static void	uao_free(struct uvm_aobj *);
    182 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    183 		    int *, int, vm_prot_t, int, int);
    184 static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    185 
    186 #if defined(VMSWAP)
    187 static struct uao_swhash_elt *uao_find_swhash_elt
    188     (struct uvm_aobj *, int, bool);
    189 
    190 static bool uao_pagein(struct uvm_aobj *, int, int);
    191 static bool uao_pagein_page(struct uvm_aobj *, int);
    192 static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    193 #endif /* defined(VMSWAP) */
    194 
    195 /*
    196  * aobj_pager
    197  *
    198  * note that some functions (e.g. put) are handled elsewhere
    199  */
    200 
    201 struct uvm_pagerops aobj_pager = {
    202 	NULL,			/* init */
    203 	uao_reference,		/* reference */
    204 	uao_detach,		/* detach */
    205 	NULL,			/* fault */
    206 	uao_get,		/* get */
    207 	uao_put,		/* flush */
    208 };
    209 
    210 /*
    211  * uao_list: global list of active aobjs, locked by uao_list_lock
    212  */
    213 
    214 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    215 static kmutex_t uao_list_lock;
    216 
    217 /*
    218  * functions
    219  */
    220 
    221 /*
    222  * hash table/array related functions
    223  */
    224 
    225 #if defined(VMSWAP)
    226 
    227 /*
    228  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    229  * offset.
    230  *
    231  * => the object should be locked by the caller
    232  */
    233 
    234 static struct uao_swhash_elt *
    235 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    236 {
    237 	struct uao_swhash *swhash;
    238 	struct uao_swhash_elt *elt;
    239 	voff_t page_tag;
    240 
    241 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    242 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    243 
    244 	/*
    245 	 * now search the bucket for the requested tag
    246 	 */
    247 
    248 	LIST_FOREACH(elt, swhash, list) {
    249 		if (elt->tag == page_tag) {
    250 			return elt;
    251 		}
    252 	}
    253 	if (!create) {
    254 		return NULL;
    255 	}
    256 
    257 	/*
    258 	 * allocate a new entry for the bucket and init/insert it in
    259 	 */
    260 
    261 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    262 	if (elt == NULL) {
    263 		return NULL;
    264 	}
    265 	LIST_INSERT_HEAD(swhash, elt, list);
    266 	elt->tag = page_tag;
    267 	elt->count = 0;
    268 	memset(elt->slots, 0, sizeof(elt->slots));
    269 	return elt;
    270 }
    271 
    272 /*
    273  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    274  *
    275  * => object must be locked by caller
    276  */
    277 
    278 int
    279 uao_find_swslot(struct uvm_object *uobj, int pageidx)
    280 {
    281 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    282 	struct uao_swhash_elt *elt;
    283 
    284 	/*
    285 	 * if noswap flag is set, then we never return a slot
    286 	 */
    287 
    288 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    289 		return(0);
    290 
    291 	/*
    292 	 * if hashing, look in hash table.
    293 	 */
    294 
    295 	if (UAO_USES_SWHASH(aobj)) {
    296 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    297 		if (elt)
    298 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    299 		else
    300 			return(0);
    301 	}
    302 
    303 	/*
    304 	 * otherwise, look in the array
    305 	 */
    306 
    307 	return(aobj->u_swslots[pageidx]);
    308 }
    309 
    310 /*
    311  * uao_set_swslot: set the swap slot for a page in an aobj.
    312  *
    313  * => setting a slot to zero frees the slot
    314  * => object must be locked by caller
    315  * => we return the old slot number, or -1 if we failed to allocate
    316  *    memory to record the new slot number
    317  */
    318 
    319 int
    320 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    321 {
    322 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    323 	struct uao_swhash_elt *elt;
    324 	int oldslot;
    325 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    326 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    327 	    aobj, pageidx, slot, 0);
    328 
    329 	/*
    330 	 * if noswap flag is set, then we can't set a non-zero slot.
    331 	 */
    332 
    333 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    334 		if (slot == 0)
    335 			return(0);
    336 
    337 		printf("uao_set_swslot: uobj = %p\n", uobj);
    338 		panic("uao_set_swslot: NOSWAP object");
    339 	}
    340 
    341 	/*
    342 	 * are we using a hash table?  if so, add it in the hash.
    343 	 */
    344 
    345 	if (UAO_USES_SWHASH(aobj)) {
    346 
    347 		/*
    348 		 * Avoid allocating an entry just to free it again if
    349 		 * the page had not swap slot in the first place, and
    350 		 * we are freeing.
    351 		 */
    352 
    353 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    354 		if (elt == NULL) {
    355 			return slot ? -1 : 0;
    356 		}
    357 
    358 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    359 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    360 
    361 		/*
    362 		 * now adjust the elt's reference counter and free it if we've
    363 		 * dropped it to zero.
    364 		 */
    365 
    366 		if (slot) {
    367 			if (oldslot == 0)
    368 				elt->count++;
    369 		} else {
    370 			if (oldslot)
    371 				elt->count--;
    372 
    373 			if (elt->count == 0) {
    374 				LIST_REMOVE(elt, list);
    375 				pool_put(&uao_swhash_elt_pool, elt);
    376 			}
    377 		}
    378 	} else {
    379 		/* we are using an array */
    380 		oldslot = aobj->u_swslots[pageidx];
    381 		aobj->u_swslots[pageidx] = slot;
    382 	}
    383 	return (oldslot);
    384 }
    385 
    386 #endif /* defined(VMSWAP) */
    387 
    388 /*
    389  * end of hash/array functions
    390  */
    391 
    392 /*
    393  * uao_free: free all resources held by an aobj, and then free the aobj
    394  *
    395  * => the aobj should be dead
    396  */
    397 
    398 static void
    399 uao_free(struct uvm_aobj *aobj)
    400 {
    401 	int swpgonlydelta = 0;
    402 
    403 	simple_unlock(&aobj->u_obj.vmobjlock);
    404 
    405 #if defined(VMSWAP)
    406 	uao_dropswap_range1(aobj, 0, 0);
    407 
    408 	if (UAO_USES_SWHASH(aobj)) {
    409 
    410 		/*
    411 		 * free the hash table itself.
    412 		 */
    413 
    414 		free(aobj->u_swhash, M_UVMAOBJ);
    415 	} else {
    416 
    417 		/*
    418 		 * free the array itsself.
    419 		 */
    420 
    421 		free(aobj->u_swslots, M_UVMAOBJ);
    422 	}
    423 #endif /* defined(VMSWAP) */
    424 
    425 	/*
    426 	 * finally free the aobj itself
    427 	 */
    428 
    429 	pool_put(&uvm_aobj_pool, aobj);
    430 
    431 	/*
    432 	 * adjust the counter of pages only in swap for all
    433 	 * the swap slots we've freed.
    434 	 */
    435 
    436 	if (swpgonlydelta > 0) {
    437 		mutex_enter(&uvm_swap_data_lock);
    438 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    439 		uvmexp.swpgonly -= swpgonlydelta;
    440 		mutex_exit(&uvm_swap_data_lock);
    441 	}
    442 }
    443 
    444 /*
    445  * pager functions
    446  */
    447 
    448 /*
    449  * uao_create: create an aobj of the given size and return its uvm_object.
    450  *
    451  * => for normal use, flags are always zero
    452  * => for the kernel object, the flags are:
    453  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    454  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    455  */
    456 
    457 struct uvm_object *
    458 uao_create(vsize_t size, int flags)
    459 {
    460 	static struct uvm_aobj kernel_object_store;
    461 	static int kobj_alloced = 0;
    462 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    463 	struct uvm_aobj *aobj;
    464 	int refs;
    465 
    466 	/*
    467 	 * malloc a new aobj unless we are asked for the kernel object
    468 	 */
    469 
    470 	if (flags & UAO_FLAG_KERNOBJ) {
    471 		KASSERT(!kobj_alloced);
    472 		aobj = &kernel_object_store;
    473 		aobj->u_pages = pages;
    474 		aobj->u_flags = UAO_FLAG_NOSWAP;
    475 		refs = UVM_OBJ_KERN;
    476 		kobj_alloced = UAO_FLAG_KERNOBJ;
    477 	} else if (flags & UAO_FLAG_KERNSWAP) {
    478 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    479 		aobj = &kernel_object_store;
    480 		kobj_alloced = UAO_FLAG_KERNSWAP;
    481 		refs = 0xdeadbeaf; /* XXX: gcc */
    482 	} else {
    483 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    484 		aobj->u_pages = pages;
    485 		aobj->u_flags = 0;
    486 		refs = 1;
    487 	}
    488 
    489 	/*
    490  	 * allocate hash/array if necessary
    491  	 *
    492  	 * note: in the KERNSWAP case no need to worry about locking since
    493  	 * we are still booting we should be the only thread around.
    494  	 */
    495 
    496 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    497 #if defined(VMSWAP)
    498 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    499 		    M_NOWAIT : M_WAITOK;
    500 
    501 		/* allocate hash table or array depending on object size */
    502 		if (UAO_USES_SWHASH(aobj)) {
    503 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    504 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    505 			if (aobj->u_swhash == NULL)
    506 				panic("uao_create: hashinit swhash failed");
    507 		} else {
    508 			aobj->u_swslots = malloc(pages * sizeof(int),
    509 			    M_UVMAOBJ, mflags);
    510 			if (aobj->u_swslots == NULL)
    511 				panic("uao_create: malloc swslots failed");
    512 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    513 		}
    514 #endif /* defined(VMSWAP) */
    515 
    516 		if (flags) {
    517 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    518 			return(&aobj->u_obj);
    519 		}
    520 	}
    521 
    522 	/*
    523  	 * init aobj fields
    524  	 */
    525 
    526 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
    527 
    528 	/*
    529  	 * now that aobj is ready, add it to the global list
    530  	 */
    531 
    532 	mutex_enter(&uao_list_lock);
    533 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    534 	mutex_exit(&uao_list_lock);
    535 	return(&aobj->u_obj);
    536 }
    537 
    538 
    539 
    540 /*
    541  * uao_init: set up aobj pager subsystem
    542  *
    543  * => called at boot time from uvm_pager_init()
    544  */
    545 
    546 void
    547 uao_init(void)
    548 {
    549 	static int uao_initialized;
    550 
    551 	if (uao_initialized)
    552 		return;
    553 	uao_initialized = true;
    554 	LIST_INIT(&uao_list);
    555 	/* XXXSMP should be adaptive but vmobjlock needs to be too */
    556 	mutex_init(&uao_list_lock, MUTEX_SPIN, IPL_NONE);
    557 }
    558 
    559 /*
    560  * uao_reference: add a ref to an aobj
    561  *
    562  * => aobj must be unlocked
    563  * => just lock it and call the locked version
    564  */
    565 
    566 void
    567 uao_reference(struct uvm_object *uobj)
    568 {
    569 	simple_lock(&uobj->vmobjlock);
    570 	uao_reference_locked(uobj);
    571 	simple_unlock(&uobj->vmobjlock);
    572 }
    573 
    574 /*
    575  * uao_reference_locked: add a ref to an aobj that is already locked
    576  *
    577  * => aobj must be locked
    578  * this needs to be separate from the normal routine
    579  * since sometimes we need to add a reference to an aobj when
    580  * it's already locked.
    581  */
    582 
    583 void
    584 uao_reference_locked(struct uvm_object *uobj)
    585 {
    586 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    587 
    588 	/*
    589  	 * kernel_object already has plenty of references, leave it alone.
    590  	 */
    591 
    592 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    593 		return;
    594 
    595 	uobj->uo_refs++;
    596 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    597 		    uobj, uobj->uo_refs,0,0);
    598 }
    599 
    600 /*
    601  * uao_detach: drop a reference to an aobj
    602  *
    603  * => aobj must be unlocked
    604  * => just lock it and call the locked version
    605  */
    606 
    607 void
    608 uao_detach(struct uvm_object *uobj)
    609 {
    610 	simple_lock(&uobj->vmobjlock);
    611 	uao_detach_locked(uobj);
    612 }
    613 
    614 /*
    615  * uao_detach_locked: drop a reference to an aobj
    616  *
    617  * => aobj must be locked, and is unlocked (or freed) upon return.
    618  * this needs to be separate from the normal routine
    619  * since sometimes we need to detach from an aobj when
    620  * it's already locked.
    621  */
    622 
    623 void
    624 uao_detach_locked(struct uvm_object *uobj)
    625 {
    626 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    627 	struct vm_page *pg;
    628 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    629 
    630 	/*
    631  	 * detaching from kernel_object is a noop.
    632  	 */
    633 
    634 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    635 		simple_unlock(&uobj->vmobjlock);
    636 		return;
    637 	}
    638 
    639 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    640 	uobj->uo_refs--;
    641 	if (uobj->uo_refs) {
    642 		simple_unlock(&uobj->vmobjlock);
    643 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    644 		return;
    645 	}
    646 
    647 	/*
    648  	 * remove the aobj from the global list.
    649  	 */
    650 
    651 	mutex_enter(&uao_list_lock);
    652 	LIST_REMOVE(aobj, u_list);
    653 	mutex_exit(&uao_list_lock);
    654 
    655 	/*
    656  	 * free all the pages left in the aobj.  for each page,
    657 	 * when the page is no longer busy (and thus after any disk i/o that
    658 	 * it's involved in is complete), release any swap resources and
    659 	 * free the page itself.
    660  	 */
    661 
    662 	uvm_lock_pageq();
    663 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    664 		pmap_page_protect(pg, VM_PROT_NONE);
    665 		if (pg->flags & PG_BUSY) {
    666 			pg->flags |= PG_WANTED;
    667 			uvm_unlock_pageq();
    668 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
    669 			    "uao_det", 0);
    670 			simple_lock(&uobj->vmobjlock);
    671 			uvm_lock_pageq();
    672 			continue;
    673 		}
    674 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    675 		uvm_pagefree(pg);
    676 	}
    677 	uvm_unlock_pageq();
    678 
    679 	/*
    680  	 * finally, free the aobj itself.
    681  	 */
    682 
    683 	uao_free(aobj);
    684 }
    685 
    686 /*
    687  * uao_put: flush pages out of a uvm object
    688  *
    689  * => object should be locked by caller.  we may _unlock_ the object
    690  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    691  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    692  *	XXXJRT a page, we simply just deactivate it.  Should probably
    693  *	XXXJRT handle this better, in the future (although "flushing"
    694  *	XXXJRT anonymous memory isn't terribly important).
    695  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    696  *	or block.
    697  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    698  *	for flushing.
    699  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    700  *	that new pages are inserted on the tail end of the list.  thus,
    701  *	we can make a complete pass through the object in one go by starting
    702  *	at the head and working towards the tail (new pages are put in
    703  *	front of us).
    704  * => NOTE: we are allowed to lock the page queues, so the caller
    705  *	must not be holding the lock on them [e.g. pagedaemon had
    706  *	better not call us with the queues locked]
    707  * => we return 0 unless we encountered some sort of I/O error
    708  *	XXXJRT currently never happens, as we never directly initiate
    709  *	XXXJRT I/O
    710  *
    711  * note on page traversal:
    712  *	we can traverse the pages in an object either by going down the
    713  *	linked list in "uobj->memq", or we can go over the address range
    714  *	by page doing hash table lookups for each address.  depending
    715  *	on how many pages are in the object it may be cheaper to do one
    716  *	or the other.  we set "by_list" to true if we are using memq.
    717  *	if the cost of a hash lookup was equal to the cost of the list
    718  *	traversal we could compare the number of pages in the start->stop
    719  *	range to the total number of pages in the object.  however, it
    720  *	seems that a hash table lookup is more expensive than the linked
    721  *	list traversal, so we multiply the number of pages in the
    722  *	start->stop range by a penalty which we define below.
    723  */
    724 
    725 static int
    726 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    727 {
    728 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    729 	struct vm_page *pg, *nextpg, curmp, endmp;
    730 	bool by_list;
    731 	voff_t curoff;
    732 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    733 
    734 	curoff = 0;
    735 	if (flags & PGO_ALLPAGES) {
    736 		start = 0;
    737 		stop = aobj->u_pages << PAGE_SHIFT;
    738 		by_list = true;		/* always go by the list */
    739 	} else {
    740 		start = trunc_page(start);
    741 		if (stop == 0) {
    742 			stop = aobj->u_pages << PAGE_SHIFT;
    743 		} else {
    744 			stop = round_page(stop);
    745 		}
    746 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    747 			printf("uao_flush: strange, got an out of range "
    748 			    "flush (fixed)\n");
    749 			stop = aobj->u_pages << PAGE_SHIFT;
    750 		}
    751 		by_list = (uobj->uo_npages <=
    752 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
    753 	}
    754 	UVMHIST_LOG(maphist,
    755 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    756 	    start, stop, by_list, flags);
    757 
    758 	/*
    759 	 * Don't need to do any work here if we're not freeing
    760 	 * or deactivating pages.
    761 	 */
    762 
    763 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    764 		simple_unlock(&uobj->vmobjlock);
    765 		return 0;
    766 	}
    767 
    768 	/*
    769 	 * Initialize the marker pages.  See the comment in
    770 	 * genfs_putpages() also.
    771 	 */
    772 
    773 	curmp.uobject = uobj;
    774 	curmp.offset = (voff_t)-1;
    775 	curmp.flags = PG_BUSY;
    776 	endmp.uobject = uobj;
    777 	endmp.offset = (voff_t)-1;
    778 	endmp.flags = PG_BUSY;
    779 
    780 	/*
    781 	 * now do it.  note: we must update nextpg in the body of loop or we
    782 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    783 	 * because we may free "pg" before doing the next loop.
    784 	 */
    785 
    786 	if (by_list) {
    787 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
    788 		nextpg = TAILQ_FIRST(&uobj->memq);
    789 		uvm_lwp_hold(curlwp);
    790 	} else {
    791 		curoff = start;
    792 		nextpg = NULL;	/* Quell compiler warning */
    793 	}
    794 
    795 	uvm_lock_pageq();
    796 
    797 	/* locked: both page queues and uobj */
    798 	for (;;) {
    799 		if (by_list) {
    800 			pg = nextpg;
    801 			if (pg == &endmp)
    802 				break;
    803 			nextpg = TAILQ_NEXT(pg, listq);
    804 			if (pg->offset < start || pg->offset >= stop)
    805 				continue;
    806 		} else {
    807 			if (curoff < stop) {
    808 				pg = uvm_pagelookup(uobj, curoff);
    809 				curoff += PAGE_SIZE;
    810 			} else
    811 				break;
    812 			if (pg == NULL)
    813 				continue;
    814 		}
    815 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    816 
    817 		/*
    818 		 * XXX In these first 3 cases, we always just
    819 		 * XXX deactivate the page.  We may want to
    820 		 * XXX handle the different cases more specifically
    821 		 * XXX in the future.
    822 		 */
    823 
    824 		case PGO_CLEANIT|PGO_FREE:
    825 		case PGO_CLEANIT|PGO_DEACTIVATE:
    826 		case PGO_DEACTIVATE:
    827  deactivate_it:
    828 			/* skip the page if it's wired */
    829 			if (pg->wire_count != 0)
    830 				continue;
    831 
    832 			/* ...and deactivate the page. */
    833 			pmap_clear_reference(pg);
    834 			uvm_pagedeactivate(pg);
    835 			continue;
    836 
    837 		case PGO_FREE:
    838 
    839 			/*
    840 			 * If there are multiple references to
    841 			 * the object, just deactivate the page.
    842 			 */
    843 
    844 			if (uobj->uo_refs > 1)
    845 				goto deactivate_it;
    846 
    847 			/*
    848 			 * wait and try again if the page is busy.
    849 			 * otherwise free the swap slot and the page.
    850 			 */
    851 
    852 			pmap_page_protect(pg, VM_PROT_NONE);
    853 			if (pg->flags & PG_BUSY) {
    854 				if (by_list) {
    855 					TAILQ_INSERT_BEFORE(pg, &curmp, listq);
    856 				}
    857 				pg->flags |= PG_WANTED;
    858 				uvm_unlock_pageq();
    859 				UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    860 				    "uao_put", 0);
    861 				simple_lock(&uobj->vmobjlock);
    862 				uvm_lock_pageq();
    863 				if (by_list) {
    864 					nextpg = TAILQ_NEXT(&curmp, listq);
    865 					TAILQ_REMOVE(&uobj->memq, &curmp,
    866 					    listq);
    867 				} else
    868 					curoff -= PAGE_SIZE;
    869 				continue;
    870 			}
    871 
    872 			/*
    873 			 * freeing swapslot here is not strictly necessary.
    874 			 * however, leaving it here doesn't save much
    875 			 * because we need to update swap accounting anyway.
    876 			 */
    877 
    878 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    879 			uvm_pagefree(pg);
    880 			continue;
    881 		}
    882 	}
    883 	uvm_unlock_pageq();
    884 	if (by_list) {
    885 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
    886 	}
    887 	simple_unlock(&uobj->vmobjlock);
    888 	if (by_list) {
    889 		uvm_lwp_rele(curlwp);
    890 	}
    891 	return 0;
    892 }
    893 
    894 /*
    895  * uao_get: fetch me a page
    896  *
    897  * we have three cases:
    898  * 1: page is resident     -> just return the page.
    899  * 2: page is zero-fill    -> allocate a new page and zero it.
    900  * 3: page is swapped out  -> fetch the page from swap.
    901  *
    902  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    903  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    904  * then we will need to return EBUSY.
    905  *
    906  * => prefer map unlocked (not required)
    907  * => object must be locked!  we will _unlock_ it before starting any I/O.
    908  * => flags: PGO_ALLPAGES: get all of the pages
    909  *           PGO_LOCKED: fault data structures are locked
    910  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    911  * => NOTE: caller must check for released pages!!
    912  */
    913 
    914 static int
    915 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    916     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    917 {
    918 #if defined(VMSWAP)
    919 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    920 #endif /* defined(VMSWAP) */
    921 	voff_t current_offset;
    922 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    923 	int lcv, gotpages, maxpages, swslot, pageidx;
    924 	bool done;
    925 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    926 
    927 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    928 		    (struct uvm_aobj *)uobj, offset, flags,0);
    929 
    930 	/*
    931  	 * get number of pages
    932  	 */
    933 
    934 	maxpages = *npagesp;
    935 
    936 	/*
    937  	 * step 1: handled the case where fault data structures are locked.
    938  	 */
    939 
    940 	if (flags & PGO_LOCKED) {
    941 
    942 		/*
    943  		 * step 1a: get pages that are already resident.   only do
    944 		 * this if the data structures are locked (i.e. the first
    945 		 * time through).
    946  		 */
    947 
    948 		done = true;	/* be optimistic */
    949 		gotpages = 0;	/* # of pages we got so far */
    950 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    951 		    lcv++, current_offset += PAGE_SIZE) {
    952 			/* do we care about this page?  if not, skip it */
    953 			if (pps[lcv] == PGO_DONTCARE)
    954 				continue;
    955 			ptmp = uvm_pagelookup(uobj, current_offset);
    956 
    957 			/*
    958  			 * if page is new, attempt to allocate the page,
    959 			 * zero-fill'd.
    960  			 */
    961 
    962 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
    963 			    current_offset >> PAGE_SHIFT) == 0) {
    964 				ptmp = uvm_pagealloc(uobj, current_offset,
    965 				    NULL, UVM_PGA_ZERO);
    966 				if (ptmp) {
    967 					/* new page */
    968 					ptmp->flags &= ~(PG_FAKE);
    969 					ptmp->pqflags |= PQ_AOBJ;
    970 					goto gotpage;
    971 				}
    972 			}
    973 
    974 			/*
    975 			 * to be useful must get a non-busy page
    976 			 */
    977 
    978 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
    979 				if (lcv == centeridx ||
    980 				    (flags & PGO_ALLPAGES) != 0)
    981 					/* need to do a wait or I/O! */
    982 					done = false;
    983 					continue;
    984 			}
    985 
    986 			/*
    987 			 * useful page: busy/lock it and plug it in our
    988 			 * result array
    989 			 */
    990 
    991 			/* caller must un-busy this page */
    992 			ptmp->flags |= PG_BUSY;
    993 			UVM_PAGE_OWN(ptmp, "uao_get1");
    994 gotpage:
    995 			pps[lcv] = ptmp;
    996 			gotpages++;
    997 		}
    998 
    999 		/*
   1000  		 * step 1b: now we've either done everything needed or we
   1001 		 * to unlock and do some waiting or I/O.
   1002  		 */
   1003 
   1004 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1005 		*npagesp = gotpages;
   1006 		if (done)
   1007 			return 0;
   1008 		else
   1009 			return EBUSY;
   1010 	}
   1011 
   1012 	/*
   1013  	 * step 2: get non-resident or busy pages.
   1014  	 * object is locked.   data structures are unlocked.
   1015  	 */
   1016 
   1017 	if ((flags & PGO_SYNCIO) == 0) {
   1018 		goto done;
   1019 	}
   1020 
   1021 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1022 	    lcv++, current_offset += PAGE_SIZE) {
   1023 
   1024 		/*
   1025 		 * - skip over pages we've already gotten or don't want
   1026 		 * - skip over pages we don't _have_ to get
   1027 		 */
   1028 
   1029 		if (pps[lcv] != NULL ||
   1030 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1031 			continue;
   1032 
   1033 		pageidx = current_offset >> PAGE_SHIFT;
   1034 
   1035 		/*
   1036  		 * we have yet to locate the current page (pps[lcv]).   we
   1037 		 * first look for a page that is already at the current offset.
   1038 		 * if we find a page, we check to see if it is busy or
   1039 		 * released.  if that is the case, then we sleep on the page
   1040 		 * until it is no longer busy or released and repeat the lookup.
   1041 		 * if the page we found is neither busy nor released, then we
   1042 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1043 		 * 'break's the following while loop and indicates we are
   1044 		 * ready to move on to the next page in the "lcv" loop above.
   1045  		 *
   1046  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1047 		 * then it means that we allocated a new busy/fake/clean page
   1048 		 * ptmp in the object and we need to do I/O to fill in the data.
   1049  		 */
   1050 
   1051 		/* top of "pps" while loop */
   1052 		while (pps[lcv] == NULL) {
   1053 			/* look for a resident page */
   1054 			ptmp = uvm_pagelookup(uobj, current_offset);
   1055 
   1056 			/* not resident?   allocate one now (if we can) */
   1057 			if (ptmp == NULL) {
   1058 
   1059 				ptmp = uvm_pagealloc(uobj, current_offset,
   1060 				    NULL, 0);
   1061 
   1062 				/* out of RAM? */
   1063 				if (ptmp == NULL) {
   1064 					simple_unlock(&uobj->vmobjlock);
   1065 					UVMHIST_LOG(pdhist,
   1066 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1067 					uvm_wait("uao_getpage");
   1068 					simple_lock(&uobj->vmobjlock);
   1069 					continue;
   1070 				}
   1071 
   1072 				/*
   1073 				 * safe with PQ's unlocked: because we just
   1074 				 * alloc'd the page
   1075 				 */
   1076 
   1077 				ptmp->pqflags |= PQ_AOBJ;
   1078 
   1079 				/*
   1080 				 * got new page ready for I/O.  break pps while
   1081 				 * loop.  pps[lcv] is still NULL.
   1082 				 */
   1083 
   1084 				break;
   1085 			}
   1086 
   1087 			/* page is there, see if we need to wait on it */
   1088 			if ((ptmp->flags & PG_BUSY) != 0) {
   1089 				ptmp->flags |= PG_WANTED;
   1090 				UVMHIST_LOG(pdhist,
   1091 				    "sleeping, ptmp->flags 0x%x\n",
   1092 				    ptmp->flags,0,0,0);
   1093 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1094 				    false, "uao_get", 0);
   1095 				simple_lock(&uobj->vmobjlock);
   1096 				continue;
   1097 			}
   1098 
   1099 			/*
   1100  			 * if we get here then the page has become resident and
   1101 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1102 			 * own it) and set pps[lcv] (so that we exit the while
   1103 			 * loop).
   1104  			 */
   1105 
   1106 			/* we own it, caller must un-busy */
   1107 			ptmp->flags |= PG_BUSY;
   1108 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1109 			pps[lcv] = ptmp;
   1110 		}
   1111 
   1112 		/*
   1113  		 * if we own the valid page at the correct offset, pps[lcv] will
   1114  		 * point to it.   nothing more to do except go to the next page.
   1115  		 */
   1116 
   1117 		if (pps[lcv])
   1118 			continue;			/* next lcv */
   1119 
   1120 		/*
   1121  		 * we have a "fake/busy/clean" page that we just allocated.
   1122  		 * do the needed "i/o", either reading from swap or zeroing.
   1123  		 */
   1124 
   1125 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1126 
   1127 		/*
   1128  		 * just zero the page if there's nothing in swap.
   1129  		 */
   1130 
   1131 		if (swslot == 0) {
   1132 
   1133 			/*
   1134 			 * page hasn't existed before, just zero it.
   1135 			 */
   1136 
   1137 			uvm_pagezero(ptmp);
   1138 		} else {
   1139 #if defined(VMSWAP)
   1140 			int error;
   1141 
   1142 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1143 			     swslot, 0,0,0);
   1144 
   1145 			/*
   1146 			 * page in the swapped-out page.
   1147 			 * unlock object for i/o, relock when done.
   1148 			 */
   1149 
   1150 			simple_unlock(&uobj->vmobjlock);
   1151 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1152 			simple_lock(&uobj->vmobjlock);
   1153 
   1154 			/*
   1155 			 * I/O done.  check for errors.
   1156 			 */
   1157 
   1158 			if (error != 0) {
   1159 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1160 				    error,0,0,0);
   1161 				if (ptmp->flags & PG_WANTED)
   1162 					wakeup(ptmp);
   1163 
   1164 				/*
   1165 				 * remove the swap slot from the aobj
   1166 				 * and mark the aobj as having no real slot.
   1167 				 * don't free the swap slot, thus preventing
   1168 				 * it from being used again.
   1169 				 */
   1170 
   1171 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1172 							SWSLOT_BAD);
   1173 				if (swslot > 0) {
   1174 					uvm_swap_markbad(swslot, 1);
   1175 				}
   1176 
   1177 				uvm_lock_pageq();
   1178 				uvm_pagefree(ptmp);
   1179 				uvm_unlock_pageq();
   1180 				simple_unlock(&uobj->vmobjlock);
   1181 				return error;
   1182 			}
   1183 #else /* defined(VMSWAP) */
   1184 			panic("%s: pagein", __func__);
   1185 #endif /* defined(VMSWAP) */
   1186 		}
   1187 
   1188 		if ((access_type & VM_PROT_WRITE) == 0) {
   1189 			ptmp->flags |= PG_CLEAN;
   1190 			pmap_clear_modify(ptmp);
   1191 		}
   1192 
   1193 		/*
   1194  		 * we got the page!   clear the fake flag (indicates valid
   1195 		 * data now in page) and plug into our result array.   note
   1196 		 * that page is still busy.
   1197  		 *
   1198  		 * it is the callers job to:
   1199  		 * => check if the page is released
   1200  		 * => unbusy the page
   1201  		 * => activate the page
   1202  		 */
   1203 
   1204 		ptmp->flags &= ~PG_FAKE;
   1205 		pps[lcv] = ptmp;
   1206 	}
   1207 
   1208 	/*
   1209  	 * finally, unlock object and return.
   1210  	 */
   1211 
   1212 done:
   1213 	simple_unlock(&uobj->vmobjlock);
   1214 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1215 	return 0;
   1216 }
   1217 
   1218 #if defined(VMSWAP)
   1219 
   1220 /*
   1221  * uao_dropswap:  release any swap resources from this aobj page.
   1222  *
   1223  * => aobj must be locked or have a reference count of 0.
   1224  */
   1225 
   1226 void
   1227 uao_dropswap(struct uvm_object *uobj, int pageidx)
   1228 {
   1229 	int slot;
   1230 
   1231 	slot = uao_set_swslot(uobj, pageidx, 0);
   1232 	if (slot) {
   1233 		uvm_swap_free(slot, 1);
   1234 	}
   1235 }
   1236 
   1237 /*
   1238  * page in every page in every aobj that is paged-out to a range of swslots.
   1239  *
   1240  * => nothing should be locked.
   1241  * => returns true if pagein was aborted due to lack of memory.
   1242  */
   1243 
   1244 bool
   1245 uao_swap_off(int startslot, int endslot)
   1246 {
   1247 	struct uvm_aobj *aobj, *nextaobj;
   1248 	bool rv;
   1249 
   1250 	/*
   1251 	 * walk the list of all aobjs.
   1252 	 */
   1253 
   1254 restart:
   1255 	mutex_enter(&uao_list_lock);
   1256 	for (aobj = LIST_FIRST(&uao_list);
   1257 	     aobj != NULL;
   1258 	     aobj = nextaobj) {
   1259 
   1260 		/*
   1261 		 * try to get the object lock, start all over if we fail.
   1262 		 * most of the time we'll get the aobj lock,
   1263 		 * so this should be a rare case.
   1264 		 */
   1265 
   1266 		if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
   1267 			mutex_exit(&uao_list_lock);
   1268 			goto restart;
   1269 		}
   1270 
   1271 		/*
   1272 		 * add a ref to the aobj so it doesn't disappear
   1273 		 * while we're working.
   1274 		 */
   1275 
   1276 		uao_reference_locked(&aobj->u_obj);
   1277 
   1278 		/*
   1279 		 * now it's safe to unlock the uao list.
   1280 		 */
   1281 
   1282 		mutex_exit(&uao_list_lock);
   1283 
   1284 		/*
   1285 		 * page in any pages in the swslot range.
   1286 		 * if there's an error, abort and return the error.
   1287 		 */
   1288 
   1289 		rv = uao_pagein(aobj, startslot, endslot);
   1290 		if (rv) {
   1291 			uao_detach_locked(&aobj->u_obj);
   1292 			return rv;
   1293 		}
   1294 
   1295 		/*
   1296 		 * we're done with this aobj.
   1297 		 * relock the list and drop our ref on the aobj.
   1298 		 */
   1299 
   1300 		mutex_enter(&uao_list_lock);
   1301 		nextaobj = LIST_NEXT(aobj, u_list);
   1302 		uao_detach_locked(&aobj->u_obj);
   1303 	}
   1304 
   1305 	/*
   1306 	 * done with traversal, unlock the list
   1307 	 */
   1308 	mutex_exit(&uao_list_lock);
   1309 	return false;
   1310 }
   1311 
   1312 
   1313 /*
   1314  * page in any pages from aobj in the given range.
   1315  *
   1316  * => aobj must be locked and is returned locked.
   1317  * => returns true if pagein was aborted due to lack of memory.
   1318  */
   1319 static bool
   1320 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1321 {
   1322 	bool rv;
   1323 
   1324 	if (UAO_USES_SWHASH(aobj)) {
   1325 		struct uao_swhash_elt *elt;
   1326 		int buck;
   1327 
   1328 restart:
   1329 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1330 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1331 			     elt != NULL;
   1332 			     elt = LIST_NEXT(elt, list)) {
   1333 				int i;
   1334 
   1335 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1336 					int slot = elt->slots[i];
   1337 
   1338 					/*
   1339 					 * if the slot isn't in range, skip it.
   1340 					 */
   1341 
   1342 					if (slot < startslot ||
   1343 					    slot >= endslot) {
   1344 						continue;
   1345 					}
   1346 
   1347 					/*
   1348 					 * process the page,
   1349 					 * the start over on this object
   1350 					 * since the swhash elt
   1351 					 * may have been freed.
   1352 					 */
   1353 
   1354 					rv = uao_pagein_page(aobj,
   1355 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1356 					if (rv) {
   1357 						return rv;
   1358 					}
   1359 					goto restart;
   1360 				}
   1361 			}
   1362 		}
   1363 	} else {
   1364 		int i;
   1365 
   1366 		for (i = 0; i < aobj->u_pages; i++) {
   1367 			int slot = aobj->u_swslots[i];
   1368 
   1369 			/*
   1370 			 * if the slot isn't in range, skip it
   1371 			 */
   1372 
   1373 			if (slot < startslot || slot >= endslot) {
   1374 				continue;
   1375 			}
   1376 
   1377 			/*
   1378 			 * process the page.
   1379 			 */
   1380 
   1381 			rv = uao_pagein_page(aobj, i);
   1382 			if (rv) {
   1383 				return rv;
   1384 			}
   1385 		}
   1386 	}
   1387 
   1388 	return false;
   1389 }
   1390 
   1391 /*
   1392  * page in a page from an aobj.  used for swap_off.
   1393  * returns true if pagein was aborted due to lack of memory.
   1394  *
   1395  * => aobj must be locked and is returned locked.
   1396  */
   1397 
   1398 static bool
   1399 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1400 {
   1401 	struct vm_page *pg;
   1402 	int rv, npages;
   1403 
   1404 	pg = NULL;
   1405 	npages = 1;
   1406 	/* locked: aobj */
   1407 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1408 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1409 	/* unlocked: aobj */
   1410 
   1411 	/*
   1412 	 * relock and finish up.
   1413 	 */
   1414 
   1415 	simple_lock(&aobj->u_obj.vmobjlock);
   1416 	switch (rv) {
   1417 	case 0:
   1418 		break;
   1419 
   1420 	case EIO:
   1421 	case ERESTART:
   1422 
   1423 		/*
   1424 		 * nothing more to do on errors.
   1425 		 * ERESTART can only mean that the anon was freed,
   1426 		 * so again there's nothing to do.
   1427 		 */
   1428 
   1429 		return false;
   1430 
   1431 	default:
   1432 		return true;
   1433 	}
   1434 
   1435 	/*
   1436 	 * ok, we've got the page now.
   1437 	 * mark it as dirty, clear its swslot and un-busy it.
   1438 	 */
   1439 	uao_dropswap(&aobj->u_obj, pageidx);
   1440 
   1441 	/*
   1442 	 * make sure it's on a page queue.
   1443 	 */
   1444 	uvm_lock_pageq();
   1445 	if (pg->wire_count == 0)
   1446 		uvm_pageenqueue(pg);
   1447 	uvm_unlock_pageq();
   1448 
   1449 	if (pg->flags & PG_WANTED) {
   1450 		wakeup(pg);
   1451 	}
   1452 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1453 	UVM_PAGE_OWN(pg, NULL);
   1454 
   1455 	return false;
   1456 }
   1457 
   1458 /*
   1459  * uao_dropswap_range: drop swapslots in the range.
   1460  *
   1461  * => aobj must be locked and is returned locked.
   1462  * => start is inclusive.  end is exclusive.
   1463  */
   1464 
   1465 void
   1466 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1467 {
   1468 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1469 
   1470 	LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
   1471 
   1472 	uao_dropswap_range1(aobj, start, end);
   1473 }
   1474 
   1475 static void
   1476 uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1477 {
   1478 	int swpgonlydelta = 0;
   1479 
   1480 	if (end == 0) {
   1481 		end = INT64_MAX;
   1482 	}
   1483 
   1484 	if (UAO_USES_SWHASH(aobj)) {
   1485 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1486 		voff_t taghi;
   1487 		voff_t taglo;
   1488 
   1489 		taglo = UAO_SWHASH_ELT_TAG(start);
   1490 		taghi = UAO_SWHASH_ELT_TAG(end);
   1491 
   1492 		for (i = 0; i < hashbuckets; i++) {
   1493 			struct uao_swhash_elt *elt, *next;
   1494 
   1495 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1496 			     elt != NULL;
   1497 			     elt = next) {
   1498 				int startidx, endidx;
   1499 				int j;
   1500 
   1501 				next = LIST_NEXT(elt, list);
   1502 
   1503 				if (elt->tag < taglo || taghi < elt->tag) {
   1504 					continue;
   1505 				}
   1506 
   1507 				if (elt->tag == taglo) {
   1508 					startidx =
   1509 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1510 				} else {
   1511 					startidx = 0;
   1512 				}
   1513 
   1514 				if (elt->tag == taghi) {
   1515 					endidx =
   1516 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1517 				} else {
   1518 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1519 				}
   1520 
   1521 				for (j = startidx; j < endidx; j++) {
   1522 					int slot = elt->slots[j];
   1523 
   1524 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1525 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1526 					    + j) << PAGE_SHIFT) == NULL);
   1527 					if (slot > 0) {
   1528 						uvm_swap_free(slot, 1);
   1529 						swpgonlydelta++;
   1530 						KASSERT(elt->count > 0);
   1531 						elt->slots[j] = 0;
   1532 						elt->count--;
   1533 					}
   1534 				}
   1535 
   1536 				if (elt->count == 0) {
   1537 					LIST_REMOVE(elt, list);
   1538 					pool_put(&uao_swhash_elt_pool, elt);
   1539 				}
   1540 			}
   1541 		}
   1542 	} else {
   1543 		int i;
   1544 
   1545 		if (aobj->u_pages < end) {
   1546 			end = aobj->u_pages;
   1547 		}
   1548 		for (i = start; i < end; i++) {
   1549 			int slot = aobj->u_swslots[i];
   1550 
   1551 			if (slot > 0) {
   1552 				uvm_swap_free(slot, 1);
   1553 				swpgonlydelta++;
   1554 			}
   1555 		}
   1556 	}
   1557 
   1558 	/*
   1559 	 * adjust the counter of pages only in swap for all
   1560 	 * the swap slots we've freed.
   1561 	 */
   1562 
   1563 	if (swpgonlydelta > 0) {
   1564 		mutex_enter(&uvm_swap_data_lock);
   1565 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1566 		uvmexp.swpgonly -= swpgonlydelta;
   1567 		mutex_exit(&uvm_swap_data_lock);
   1568 	}
   1569 }
   1570 
   1571 #endif /* defined(VMSWAP) */
   1572