Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.101
      1 /*	$NetBSD: uvm_aobj.c,v 1.101 2008/06/03 11:51:01 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  *
     34  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35  */
     36 /*
     37  * uvm_aobj.c: anonymous memory uvm_object pager
     38  *
     39  * author: Chuck Silvers <chuq (at) chuq.com>
     40  * started: Jan-1998
     41  *
     42  * - design mostly from Chuck Cranor
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.101 2008/06/03 11:51:01 ad Exp $");
     47 
     48 #include "opt_uvmhist.h"
     49 
     50 #include <sys/param.h>
     51 #include <sys/systm.h>
     52 #include <sys/proc.h>
     53 #include <sys/malloc.h>
     54 #include <sys/kernel.h>
     55 #include <sys/pool.h>
     56 
     57 #include <uvm/uvm.h>
     58 
     59 /*
     60  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61  * to keeping the list of resident pages, it also keeps a list of
     62  * allocated swap blocks.  depending on the size of the aobj this list
     63  * of allocated swap blocks is either stored in an array (small objects)
     64  * or in a hash table (large objects).
     65  */
     66 
     67 /*
     68  * local structures
     69  */
     70 
     71 /*
     72  * for hash tables, we break the address space of the aobj into blocks
     73  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74  * be a power of two.
     75  */
     76 
     77 #define UAO_SWHASH_CLUSTER_SHIFT 4
     78 #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79 
     80 /* get the "tag" for this page index */
     81 #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83 
     84 #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     85 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     86 
     87 /* given an ELT and a page index, find the swap slot */
     88 #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     90 
     91 /* given an ELT, return its pageidx base */
     92 #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94 
     95 /*
     96  * the swhash hash function
     97  */
     98 
     99 #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
    100 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    101 			    & (AOBJ)->u_swhashmask)])
    102 
    103 /*
    104  * the swhash threshhold determines if we will use an array or a
    105  * hash table to store the list of allocated swap blocks.
    106  */
    107 
    108 #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    109 #define UAO_USES_SWHASH(AOBJ) \
    110 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    111 
    112 /*
    113  * the number of buckets in a swhash, with an upper bound
    114  */
    115 
    116 #define UAO_SWHASH_MAXBUCKETS 256
    117 #define UAO_SWHASH_BUCKETS(AOBJ) \
    118 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    119 	     UAO_SWHASH_MAXBUCKETS))
    120 
    121 
    122 /*
    123  * uao_swhash_elt: when a hash table is being used, this structure defines
    124  * the format of an entry in the bucket list.
    125  */
    126 
    127 struct uao_swhash_elt {
    128 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    129 	voff_t tag;				/* our 'tag' */
    130 	int count;				/* our number of active slots */
    131 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    132 };
    133 
    134 /*
    135  * uao_swhash: the swap hash table structure
    136  */
    137 
    138 LIST_HEAD(uao_swhash, uao_swhash_elt);
    139 
    140 /*
    141  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    142  * NOTE: Pages for this pool must not come from a pageable kernel map!
    143  */
    144 POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
    145     "uaoeltpl", NULL, IPL_VM);
    146 
    147 /*
    148  * uvm_aobj: the actual anon-backed uvm_object
    149  *
    150  * => the uvm_object is at the top of the structure, this allows
    151  *   (struct uvm_aobj *) == (struct uvm_object *)
    152  * => only one of u_swslots and u_swhash is used in any given aobj
    153  */
    154 
    155 struct uvm_aobj {
    156 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    157 	pgoff_t u_pages;	 /* number of pages in entire object */
    158 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    159 	int *u_swslots;		 /* array of offset->swapslot mappings */
    160 				 /*
    161 				  * hashtable of offset->swapslot mappings
    162 				  * (u_swhash is an array of bucket heads)
    163 				  */
    164 	struct uao_swhash *u_swhash;
    165 	u_long u_swhashmask;		/* mask for hashtable */
    166 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    167 };
    168 
    169 /*
    170  * uvm_aobj_pool: pool of uvm_aobj structures
    171  */
    172 POOL_INIT(uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, "aobjpl",
    173     &pool_allocator_nointr, IPL_NONE);
    174 
    175 MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
    176 
    177 /*
    178  * local functions
    179  */
    180 
    181 static void	uao_free(struct uvm_aobj *);
    182 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    183 		    int *, int, vm_prot_t, int, int);
    184 static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    185 
    186 #if defined(VMSWAP)
    187 static struct uao_swhash_elt *uao_find_swhash_elt
    188     (struct uvm_aobj *, int, bool);
    189 
    190 static bool uao_pagein(struct uvm_aobj *, int, int);
    191 static bool uao_pagein_page(struct uvm_aobj *, int);
    192 static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    193 #endif /* defined(VMSWAP) */
    194 
    195 /*
    196  * aobj_pager
    197  *
    198  * note that some functions (e.g. put) are handled elsewhere
    199  */
    200 
    201 const struct uvm_pagerops aobj_pager = {
    202 	.pgo_reference = uao_reference,
    203 	.pgo_detach = uao_detach,
    204 	.pgo_get = uao_get,
    205 	.pgo_put = uao_put,
    206 };
    207 
    208 /*
    209  * uao_list: global list of active aobjs, locked by uao_list_lock
    210  */
    211 
    212 static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    213 static kmutex_t uao_list_lock;
    214 
    215 /*
    216  * functions
    217  */
    218 
    219 /*
    220  * hash table/array related functions
    221  */
    222 
    223 #if defined(VMSWAP)
    224 
    225 /*
    226  * uao_hashinit: limited version of hashinit() that uses malloc(). XXX
    227  */
    228 static void *
    229 uao_hashinit(u_int elements, int mflags, u_long *hashmask)
    230 {
    231 	LIST_HEAD(, generic) *elm, *emx;
    232 	u_long hashsize;
    233 	void *p;
    234 
    235 	for (hashsize = 1; hashsize < elements; hashsize <<= 1)
    236 		continue;
    237 	if ((p = malloc(hashsize * sizeof(*elm), M_UVMAOBJ, mflags)) == NULL)
    238 		return (NULL);
    239 	for (elm = p, emx = elm + hashsize; elm < emx; elm++)
    240 		LIST_INIT(elm);
    241 	*hashmask = hashsize - 1;
    242 
    243 	return (p);
    244 }
    245 
    246 /*
    247  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    248  * offset.
    249  *
    250  * => the object should be locked by the caller
    251  */
    252 
    253 static struct uao_swhash_elt *
    254 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    255 {
    256 	struct uao_swhash *swhash;
    257 	struct uao_swhash_elt *elt;
    258 	voff_t page_tag;
    259 
    260 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    261 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    262 
    263 	/*
    264 	 * now search the bucket for the requested tag
    265 	 */
    266 
    267 	LIST_FOREACH(elt, swhash, list) {
    268 		if (elt->tag == page_tag) {
    269 			return elt;
    270 		}
    271 	}
    272 	if (!create) {
    273 		return NULL;
    274 	}
    275 
    276 	/*
    277 	 * allocate a new entry for the bucket and init/insert it in
    278 	 */
    279 
    280 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    281 	if (elt == NULL) {
    282 		return NULL;
    283 	}
    284 	LIST_INSERT_HEAD(swhash, elt, list);
    285 	elt->tag = page_tag;
    286 	elt->count = 0;
    287 	memset(elt->slots, 0, sizeof(elt->slots));
    288 	return elt;
    289 }
    290 
    291 /*
    292  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    293  *
    294  * => object must be locked by caller
    295  */
    296 
    297 int
    298 uao_find_swslot(struct uvm_object *uobj, int pageidx)
    299 {
    300 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    301 	struct uao_swhash_elt *elt;
    302 
    303 	/*
    304 	 * if noswap flag is set, then we never return a slot
    305 	 */
    306 
    307 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    308 		return(0);
    309 
    310 	/*
    311 	 * if hashing, look in hash table.
    312 	 */
    313 
    314 	if (UAO_USES_SWHASH(aobj)) {
    315 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    316 		if (elt)
    317 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    318 		else
    319 			return(0);
    320 	}
    321 
    322 	/*
    323 	 * otherwise, look in the array
    324 	 */
    325 
    326 	return(aobj->u_swslots[pageidx]);
    327 }
    328 
    329 /*
    330  * uao_set_swslot: set the swap slot for a page in an aobj.
    331  *
    332  * => setting a slot to zero frees the slot
    333  * => object must be locked by caller
    334  * => we return the old slot number, or -1 if we failed to allocate
    335  *    memory to record the new slot number
    336  */
    337 
    338 int
    339 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    340 {
    341 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    342 	struct uao_swhash_elt *elt;
    343 	int oldslot;
    344 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    345 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    346 	    aobj, pageidx, slot, 0);
    347 
    348 	/*
    349 	 * if noswap flag is set, then we can't set a non-zero slot.
    350 	 */
    351 
    352 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    353 		if (slot == 0)
    354 			return(0);
    355 
    356 		printf("uao_set_swslot: uobj = %p\n", uobj);
    357 		panic("uao_set_swslot: NOSWAP object");
    358 	}
    359 
    360 	/*
    361 	 * are we using a hash table?  if so, add it in the hash.
    362 	 */
    363 
    364 	if (UAO_USES_SWHASH(aobj)) {
    365 
    366 		/*
    367 		 * Avoid allocating an entry just to free it again if
    368 		 * the page had not swap slot in the first place, and
    369 		 * we are freeing.
    370 		 */
    371 
    372 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    373 		if (elt == NULL) {
    374 			return slot ? -1 : 0;
    375 		}
    376 
    377 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    378 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    379 
    380 		/*
    381 		 * now adjust the elt's reference counter and free it if we've
    382 		 * dropped it to zero.
    383 		 */
    384 
    385 		if (slot) {
    386 			if (oldslot == 0)
    387 				elt->count++;
    388 		} else {
    389 			if (oldslot)
    390 				elt->count--;
    391 
    392 			if (elt->count == 0) {
    393 				LIST_REMOVE(elt, list);
    394 				pool_put(&uao_swhash_elt_pool, elt);
    395 			}
    396 		}
    397 	} else {
    398 		/* we are using an array */
    399 		oldslot = aobj->u_swslots[pageidx];
    400 		aobj->u_swslots[pageidx] = slot;
    401 	}
    402 	return (oldslot);
    403 }
    404 
    405 #endif /* defined(VMSWAP) */
    406 
    407 /*
    408  * end of hash/array functions
    409  */
    410 
    411 /*
    412  * uao_free: free all resources held by an aobj, and then free the aobj
    413  *
    414  * => the aobj should be dead
    415  */
    416 
    417 static void
    418 uao_free(struct uvm_aobj *aobj)
    419 {
    420 	int swpgonlydelta = 0;
    421 
    422 
    423 #if defined(VMSWAP)
    424 	uao_dropswap_range1(aobj, 0, 0);
    425 #endif /* defined(VMSWAP) */
    426 
    427 	mutex_exit(&aobj->u_obj.vmobjlock);
    428 
    429 #if defined(VMSWAP)
    430 	if (UAO_USES_SWHASH(aobj)) {
    431 
    432 		/*
    433 		 * free the hash table itself.
    434 		 */
    435 
    436 		free(aobj->u_swhash, M_UVMAOBJ);
    437 	} else {
    438 
    439 		/*
    440 		 * free the array itsself.
    441 		 */
    442 
    443 		free(aobj->u_swslots, M_UVMAOBJ);
    444 	}
    445 #endif /* defined(VMSWAP) */
    446 
    447 	/*
    448 	 * finally free the aobj itself
    449 	 */
    450 
    451 	UVM_OBJ_DESTROY(&aobj->u_obj);
    452 	pool_put(&uvm_aobj_pool, aobj);
    453 
    454 	/*
    455 	 * adjust the counter of pages only in swap for all
    456 	 * the swap slots we've freed.
    457 	 */
    458 
    459 	if (swpgonlydelta > 0) {
    460 		mutex_enter(&uvm_swap_data_lock);
    461 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    462 		uvmexp.swpgonly -= swpgonlydelta;
    463 		mutex_exit(&uvm_swap_data_lock);
    464 	}
    465 }
    466 
    467 /*
    468  * pager functions
    469  */
    470 
    471 /*
    472  * uao_create: create an aobj of the given size and return its uvm_object.
    473  *
    474  * => for normal use, flags are always zero
    475  * => for the kernel object, the flags are:
    476  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    477  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    478  */
    479 
    480 struct uvm_object *
    481 uao_create(vsize_t size, int flags)
    482 {
    483 	static struct uvm_aobj kernel_object_store;
    484 	static int kobj_alloced = 0;
    485 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    486 	struct uvm_aobj *aobj;
    487 	int refs;
    488 
    489 	/*
    490 	 * malloc a new aobj unless we are asked for the kernel object
    491 	 */
    492 
    493 	if (flags & UAO_FLAG_KERNOBJ) {
    494 		KASSERT(!kobj_alloced);
    495 		aobj = &kernel_object_store;
    496 		aobj->u_pages = pages;
    497 		aobj->u_flags = UAO_FLAG_NOSWAP;
    498 		refs = UVM_OBJ_KERN;
    499 		kobj_alloced = UAO_FLAG_KERNOBJ;
    500 	} else if (flags & UAO_FLAG_KERNSWAP) {
    501 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    502 		aobj = &kernel_object_store;
    503 		kobj_alloced = UAO_FLAG_KERNSWAP;
    504 		refs = 0xdeadbeaf; /* XXX: gcc */
    505 	} else {
    506 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    507 		aobj->u_pages = pages;
    508 		aobj->u_flags = 0;
    509 		refs = 1;
    510 	}
    511 
    512 	/*
    513  	 * allocate hash/array if necessary
    514  	 *
    515  	 * note: in the KERNSWAP case no need to worry about locking since
    516  	 * we are still booting we should be the only thread around.
    517  	 */
    518 
    519 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    520 #if defined(VMSWAP)
    521 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    522 		    M_NOWAIT : M_WAITOK;
    523 
    524 		/* allocate hash table or array depending on object size */
    525 		if (UAO_USES_SWHASH(aobj)) {
    526 			aobj->u_swhash = uao_hashinit(UAO_SWHASH_BUCKETS(aobj),
    527 			    mflags, &aobj->u_swhashmask);
    528 			if (aobj->u_swhash == NULL)
    529 				panic("uao_create: hashinit swhash failed");
    530 		} else {
    531 			aobj->u_swslots = malloc(pages * sizeof(int),
    532 			    M_UVMAOBJ, mflags);
    533 			if (aobj->u_swslots == NULL)
    534 				panic("uao_create: malloc swslots failed");
    535 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    536 		}
    537 #endif /* defined(VMSWAP) */
    538 
    539 		if (flags) {
    540 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    541 			return(&aobj->u_obj);
    542 		}
    543 	}
    544 
    545 	/*
    546  	 * init aobj fields
    547  	 */
    548 
    549 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
    550 
    551 	/*
    552  	 * now that aobj is ready, add it to the global list
    553  	 */
    554 
    555 	mutex_enter(&uao_list_lock);
    556 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    557 	mutex_exit(&uao_list_lock);
    558 	return(&aobj->u_obj);
    559 }
    560 
    561 
    562 
    563 /*
    564  * uao_init: set up aobj pager subsystem
    565  *
    566  * => called at boot time from uvm_pager_init()
    567  */
    568 
    569 void
    570 uao_init(void)
    571 {
    572 	static int uao_initialized;
    573 
    574 	if (uao_initialized)
    575 		return;
    576 	uao_initialized = true;
    577 	LIST_INIT(&uao_list);
    578 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    579 }
    580 
    581 /*
    582  * uao_reference: add a ref to an aobj
    583  *
    584  * => aobj must be unlocked
    585  * => just lock it and call the locked version
    586  */
    587 
    588 void
    589 uao_reference(struct uvm_object *uobj)
    590 {
    591 
    592 	/*
    593  	 * kernel_object already has plenty of references, leave it alone.
    594  	 */
    595 
    596 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    597 		return;
    598 
    599 	mutex_enter(&uobj->vmobjlock);
    600 	uao_reference_locked(uobj);
    601 	mutex_exit(&uobj->vmobjlock);
    602 }
    603 
    604 /*
    605  * uao_reference_locked: add a ref to an aobj that is already locked
    606  *
    607  * => aobj must be locked
    608  * this needs to be separate from the normal routine
    609  * since sometimes we need to add a reference to an aobj when
    610  * it's already locked.
    611  */
    612 
    613 void
    614 uao_reference_locked(struct uvm_object *uobj)
    615 {
    616 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    617 
    618 	/*
    619  	 * kernel_object already has plenty of references, leave it alone.
    620  	 */
    621 
    622 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    623 		return;
    624 
    625 	uobj->uo_refs++;
    626 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    627 		    uobj, uobj->uo_refs,0,0);
    628 }
    629 
    630 /*
    631  * uao_detach: drop a reference to an aobj
    632  *
    633  * => aobj must be unlocked
    634  * => just lock it and call the locked version
    635  */
    636 
    637 void
    638 uao_detach(struct uvm_object *uobj)
    639 {
    640 
    641 	/*
    642  	 * detaching from kernel_object is a noop.
    643  	 */
    644 
    645 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    646 		return;;
    647 
    648 	mutex_enter(&uobj->vmobjlock);
    649 	uao_detach_locked(uobj);
    650 }
    651 
    652 /*
    653  * uao_detach_locked: drop a reference to an aobj
    654  *
    655  * => aobj must be locked, and is unlocked (or freed) upon return.
    656  * this needs to be separate from the normal routine
    657  * since sometimes we need to detach from an aobj when
    658  * it's already locked.
    659  */
    660 
    661 void
    662 uao_detach_locked(struct uvm_object *uobj)
    663 {
    664 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    665 	struct vm_page *pg;
    666 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    667 
    668 	/*
    669  	 * detaching from kernel_object is a noop.
    670  	 */
    671 
    672 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    673 		mutex_exit(&uobj->vmobjlock);
    674 		return;
    675 	}
    676 
    677 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    678 	uobj->uo_refs--;
    679 	if (uobj->uo_refs) {
    680 		mutex_exit(&uobj->vmobjlock);
    681 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    682 		return;
    683 	}
    684 
    685 	/*
    686  	 * remove the aobj from the global list.
    687  	 */
    688 
    689 	mutex_enter(&uao_list_lock);
    690 	LIST_REMOVE(aobj, u_list);
    691 	mutex_exit(&uao_list_lock);
    692 
    693 	/*
    694  	 * free all the pages left in the aobj.  for each page,
    695 	 * when the page is no longer busy (and thus after any disk i/o that
    696 	 * it's involved in is complete), release any swap resources and
    697 	 * free the page itself.
    698  	 */
    699 
    700 	mutex_enter(&uvm_pageqlock);
    701 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    702 		pmap_page_protect(pg, VM_PROT_NONE);
    703 		if (pg->flags & PG_BUSY) {
    704 			pg->flags |= PG_WANTED;
    705 			mutex_exit(&uvm_pageqlock);
    706 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
    707 			    "uao_det", 0);
    708 			mutex_enter(&uobj->vmobjlock);
    709 			mutex_enter(&uvm_pageqlock);
    710 			continue;
    711 		}
    712 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    713 		uvm_pagefree(pg);
    714 	}
    715 	mutex_exit(&uvm_pageqlock);
    716 
    717 	/*
    718  	 * finally, free the aobj itself.
    719  	 */
    720 
    721 	uao_free(aobj);
    722 }
    723 
    724 /*
    725  * uao_put: flush pages out of a uvm object
    726  *
    727  * => object should be locked by caller.  we may _unlock_ the object
    728  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    729  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    730  *	XXXJRT a page, we simply just deactivate it.  Should probably
    731  *	XXXJRT handle this better, in the future (although "flushing"
    732  *	XXXJRT anonymous memory isn't terribly important).
    733  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    734  *	or block.
    735  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    736  *	for flushing.
    737  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    738  *	that new pages are inserted on the tail end of the list.  thus,
    739  *	we can make a complete pass through the object in one go by starting
    740  *	at the head and working towards the tail (new pages are put in
    741  *	front of us).
    742  * => NOTE: we are allowed to lock the page queues, so the caller
    743  *	must not be holding the lock on them [e.g. pagedaemon had
    744  *	better not call us with the queues locked]
    745  * => we return 0 unless we encountered some sort of I/O error
    746  *	XXXJRT currently never happens, as we never directly initiate
    747  *	XXXJRT I/O
    748  *
    749  * note on page traversal:
    750  *	we can traverse the pages in an object either by going down the
    751  *	linked list in "uobj->memq", or we can go over the address range
    752  *	by page doing hash table lookups for each address.  depending
    753  *	on how many pages are in the object it may be cheaper to do one
    754  *	or the other.  we set "by_list" to true if we are using memq.
    755  *	if the cost of a hash lookup was equal to the cost of the list
    756  *	traversal we could compare the number of pages in the start->stop
    757  *	range to the total number of pages in the object.  however, it
    758  *	seems that a hash table lookup is more expensive than the linked
    759  *	list traversal, so we multiply the number of pages in the
    760  *	start->stop range by a penalty which we define below.
    761  */
    762 
    763 static int
    764 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    765 {
    766 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    767 	struct vm_page *pg, *nextpg, curmp, endmp;
    768 	bool by_list;
    769 	voff_t curoff;
    770 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    771 
    772 	KASSERT(mutex_owned(&uobj->vmobjlock));
    773 
    774 	curoff = 0;
    775 	if (flags & PGO_ALLPAGES) {
    776 		start = 0;
    777 		stop = aobj->u_pages << PAGE_SHIFT;
    778 		by_list = true;		/* always go by the list */
    779 	} else {
    780 		start = trunc_page(start);
    781 		if (stop == 0) {
    782 			stop = aobj->u_pages << PAGE_SHIFT;
    783 		} else {
    784 			stop = round_page(stop);
    785 		}
    786 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    787 			printf("uao_flush: strange, got an out of range "
    788 			    "flush (fixed)\n");
    789 			stop = aobj->u_pages << PAGE_SHIFT;
    790 		}
    791 		by_list = (uobj->uo_npages <=
    792 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
    793 	}
    794 	UVMHIST_LOG(maphist,
    795 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    796 	    start, stop, by_list, flags);
    797 
    798 	/*
    799 	 * Don't need to do any work here if we're not freeing
    800 	 * or deactivating pages.
    801 	 */
    802 
    803 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    804 		mutex_exit(&uobj->vmobjlock);
    805 		return 0;
    806 	}
    807 
    808 	/*
    809 	 * Initialize the marker pages.  See the comment in
    810 	 * genfs_putpages() also.
    811 	 */
    812 
    813 	curmp.uobject = uobj;
    814 	curmp.offset = (voff_t)-1;
    815 	curmp.flags = PG_BUSY;
    816 	endmp.uobject = uobj;
    817 	endmp.offset = (voff_t)-1;
    818 	endmp.flags = PG_BUSY;
    819 
    820 	/*
    821 	 * now do it.  note: we must update nextpg in the body of loop or we
    822 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    823 	 * because we may free "pg" before doing the next loop.
    824 	 */
    825 
    826 	if (by_list) {
    827 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
    828 		nextpg = TAILQ_FIRST(&uobj->memq);
    829 		uvm_lwp_hold(curlwp);
    830 	} else {
    831 		curoff = start;
    832 		nextpg = NULL;	/* Quell compiler warning */
    833 	}
    834 
    835 	/* locked: uobj */
    836 	for (;;) {
    837 		if (by_list) {
    838 			pg = nextpg;
    839 			if (pg == &endmp)
    840 				break;
    841 			nextpg = TAILQ_NEXT(pg, listq);
    842 			if (pg->offset < start || pg->offset >= stop)
    843 				continue;
    844 		} else {
    845 			if (curoff < stop) {
    846 				pg = uvm_pagelookup(uobj, curoff);
    847 				curoff += PAGE_SIZE;
    848 			} else
    849 				break;
    850 			if (pg == NULL)
    851 				continue;
    852 		}
    853 
    854 		/*
    855 		 * wait and try again if the page is busy.
    856 		 */
    857 
    858 		if (pg->flags & PG_BUSY) {
    859 			if (by_list) {
    860 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
    861 			}
    862 			pg->flags |= PG_WANTED;
    863 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    864 			    "uao_put", 0);
    865 			mutex_enter(&uobj->vmobjlock);
    866 			if (by_list) {
    867 				nextpg = TAILQ_NEXT(&curmp, listq);
    868 				TAILQ_REMOVE(&uobj->memq, &curmp,
    869 				    listq);
    870 			} else
    871 				curoff -= PAGE_SIZE;
    872 			continue;
    873 		}
    874 
    875 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    876 
    877 		/*
    878 		 * XXX In these first 3 cases, we always just
    879 		 * XXX deactivate the page.  We may want to
    880 		 * XXX handle the different cases more specifically
    881 		 * XXX in the future.
    882 		 */
    883 
    884 		case PGO_CLEANIT|PGO_FREE:
    885 		case PGO_CLEANIT|PGO_DEACTIVATE:
    886 		case PGO_DEACTIVATE:
    887  deactivate_it:
    888 			mutex_enter(&uvm_pageqlock);
    889 			/* skip the page if it's wired */
    890 			if (pg->wire_count == 0) {
    891 				uvm_pagedeactivate(pg);
    892 			}
    893 			mutex_exit(&uvm_pageqlock);
    894 			break;
    895 
    896 		case PGO_FREE:
    897 			/*
    898 			 * If there are multiple references to
    899 			 * the object, just deactivate the page.
    900 			 */
    901 
    902 			if (uobj->uo_refs > 1)
    903 				goto deactivate_it;
    904 
    905 			/*
    906 			 * free the swap slot and the page.
    907 			 */
    908 
    909 			pmap_page_protect(pg, VM_PROT_NONE);
    910 
    911 			/*
    912 			 * freeing swapslot here is not strictly necessary.
    913 			 * however, leaving it here doesn't save much
    914 			 * because we need to update swap accounting anyway.
    915 			 */
    916 
    917 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    918 			mutex_enter(&uvm_pageqlock);
    919 			uvm_pagefree(pg);
    920 			mutex_exit(&uvm_pageqlock);
    921 			break;
    922 
    923 		default:
    924 			panic("%s: impossible", __func__);
    925 		}
    926 	}
    927 	if (by_list) {
    928 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
    929 		uvm_lwp_rele(curlwp);
    930 	}
    931 	mutex_exit(&uobj->vmobjlock);
    932 	return 0;
    933 }
    934 
    935 /*
    936  * uao_get: fetch me a page
    937  *
    938  * we have three cases:
    939  * 1: page is resident     -> just return the page.
    940  * 2: page is zero-fill    -> allocate a new page and zero it.
    941  * 3: page is swapped out  -> fetch the page from swap.
    942  *
    943  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    944  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    945  * then we will need to return EBUSY.
    946  *
    947  * => prefer map unlocked (not required)
    948  * => object must be locked!  we will _unlock_ it before starting any I/O.
    949  * => flags: PGO_ALLPAGES: get all of the pages
    950  *           PGO_LOCKED: fault data structures are locked
    951  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    952  * => NOTE: caller must check for released pages!!
    953  */
    954 
    955 static int
    956 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    957     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    958 {
    959 #if defined(VMSWAP)
    960 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    961 #endif /* defined(VMSWAP) */
    962 	voff_t current_offset;
    963 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    964 	int lcv, gotpages, maxpages, swslot, pageidx;
    965 	bool done;
    966 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    967 
    968 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    969 		    (struct uvm_aobj *)uobj, offset, flags,0);
    970 
    971 	/*
    972  	 * get number of pages
    973  	 */
    974 
    975 	maxpages = *npagesp;
    976 
    977 	/*
    978  	 * step 1: handled the case where fault data structures are locked.
    979  	 */
    980 
    981 	if (flags & PGO_LOCKED) {
    982 
    983 		/*
    984  		 * step 1a: get pages that are already resident.   only do
    985 		 * this if the data structures are locked (i.e. the first
    986 		 * time through).
    987  		 */
    988 
    989 		done = true;	/* be optimistic */
    990 		gotpages = 0;	/* # of pages we got so far */
    991 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    992 		    lcv++, current_offset += PAGE_SIZE) {
    993 			/* do we care about this page?  if not, skip it */
    994 			if (pps[lcv] == PGO_DONTCARE)
    995 				continue;
    996 			ptmp = uvm_pagelookup(uobj, current_offset);
    997 
    998 			/*
    999  			 * if page is new, attempt to allocate the page,
   1000 			 * zero-fill'd.
   1001  			 */
   1002 
   1003 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
   1004 			    current_offset >> PAGE_SHIFT) == 0) {
   1005 				ptmp = uvm_pagealloc(uobj, current_offset,
   1006 				    NULL, UVM_PGA_ZERO);
   1007 				if (ptmp) {
   1008 					/* new page */
   1009 					ptmp->flags &= ~(PG_FAKE);
   1010 					ptmp->pqflags |= PQ_AOBJ;
   1011 					goto gotpage;
   1012 				}
   1013 			}
   1014 
   1015 			/*
   1016 			 * to be useful must get a non-busy page
   1017 			 */
   1018 
   1019 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
   1020 				if (lcv == centeridx ||
   1021 				    (flags & PGO_ALLPAGES) != 0)
   1022 					/* need to do a wait or I/O! */
   1023 					done = false;
   1024 					continue;
   1025 			}
   1026 
   1027 			/*
   1028 			 * useful page: busy/lock it and plug it in our
   1029 			 * result array
   1030 			 */
   1031 
   1032 			/* caller must un-busy this page */
   1033 			ptmp->flags |= PG_BUSY;
   1034 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1035 gotpage:
   1036 			pps[lcv] = ptmp;
   1037 			gotpages++;
   1038 		}
   1039 
   1040 		/*
   1041  		 * step 1b: now we've either done everything needed or we
   1042 		 * to unlock and do some waiting or I/O.
   1043  		 */
   1044 
   1045 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1046 		*npagesp = gotpages;
   1047 		if (done)
   1048 			return 0;
   1049 		else
   1050 			return EBUSY;
   1051 	}
   1052 
   1053 	/*
   1054  	 * step 2: get non-resident or busy pages.
   1055  	 * object is locked.   data structures are unlocked.
   1056  	 */
   1057 
   1058 	if ((flags & PGO_SYNCIO) == 0) {
   1059 		goto done;
   1060 	}
   1061 
   1062 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1063 	    lcv++, current_offset += PAGE_SIZE) {
   1064 
   1065 		/*
   1066 		 * - skip over pages we've already gotten or don't want
   1067 		 * - skip over pages we don't _have_ to get
   1068 		 */
   1069 
   1070 		if (pps[lcv] != NULL ||
   1071 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1072 			continue;
   1073 
   1074 		pageidx = current_offset >> PAGE_SHIFT;
   1075 
   1076 		/*
   1077  		 * we have yet to locate the current page (pps[lcv]).   we
   1078 		 * first look for a page that is already at the current offset.
   1079 		 * if we find a page, we check to see if it is busy or
   1080 		 * released.  if that is the case, then we sleep on the page
   1081 		 * until it is no longer busy or released and repeat the lookup.
   1082 		 * if the page we found is neither busy nor released, then we
   1083 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1084 		 * 'break's the following while loop and indicates we are
   1085 		 * ready to move on to the next page in the "lcv" loop above.
   1086  		 *
   1087  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1088 		 * then it means that we allocated a new busy/fake/clean page
   1089 		 * ptmp in the object and we need to do I/O to fill in the data.
   1090  		 */
   1091 
   1092 		/* top of "pps" while loop */
   1093 		while (pps[lcv] == NULL) {
   1094 			/* look for a resident page */
   1095 			ptmp = uvm_pagelookup(uobj, current_offset);
   1096 
   1097 			/* not resident?   allocate one now (if we can) */
   1098 			if (ptmp == NULL) {
   1099 
   1100 				ptmp = uvm_pagealloc(uobj, current_offset,
   1101 				    NULL, 0);
   1102 
   1103 				/* out of RAM? */
   1104 				if (ptmp == NULL) {
   1105 					mutex_exit(&uobj->vmobjlock);
   1106 					UVMHIST_LOG(pdhist,
   1107 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1108 					uvm_wait("uao_getpage");
   1109 					mutex_enter(&uobj->vmobjlock);
   1110 					continue;
   1111 				}
   1112 
   1113 				/*
   1114 				 * safe with PQ's unlocked: because we just
   1115 				 * alloc'd the page
   1116 				 */
   1117 
   1118 				ptmp->pqflags |= PQ_AOBJ;
   1119 
   1120 				/*
   1121 				 * got new page ready for I/O.  break pps while
   1122 				 * loop.  pps[lcv] is still NULL.
   1123 				 */
   1124 
   1125 				break;
   1126 			}
   1127 
   1128 			/* page is there, see if we need to wait on it */
   1129 			if ((ptmp->flags & PG_BUSY) != 0) {
   1130 				ptmp->flags |= PG_WANTED;
   1131 				UVMHIST_LOG(pdhist,
   1132 				    "sleeping, ptmp->flags 0x%x\n",
   1133 				    ptmp->flags,0,0,0);
   1134 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1135 				    false, "uao_get", 0);
   1136 				mutex_enter(&uobj->vmobjlock);
   1137 				continue;
   1138 			}
   1139 
   1140 			/*
   1141  			 * if we get here then the page has become resident and
   1142 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1143 			 * own it) and set pps[lcv] (so that we exit the while
   1144 			 * loop).
   1145  			 */
   1146 
   1147 			/* we own it, caller must un-busy */
   1148 			ptmp->flags |= PG_BUSY;
   1149 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1150 			pps[lcv] = ptmp;
   1151 		}
   1152 
   1153 		/*
   1154  		 * if we own the valid page at the correct offset, pps[lcv] will
   1155  		 * point to it.   nothing more to do except go to the next page.
   1156  		 */
   1157 
   1158 		if (pps[lcv])
   1159 			continue;			/* next lcv */
   1160 
   1161 		/*
   1162  		 * we have a "fake/busy/clean" page that we just allocated.
   1163  		 * do the needed "i/o", either reading from swap or zeroing.
   1164  		 */
   1165 
   1166 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1167 
   1168 		/*
   1169  		 * just zero the page if there's nothing in swap.
   1170  		 */
   1171 
   1172 		if (swslot == 0) {
   1173 
   1174 			/*
   1175 			 * page hasn't existed before, just zero it.
   1176 			 */
   1177 
   1178 			uvm_pagezero(ptmp);
   1179 		} else {
   1180 #if defined(VMSWAP)
   1181 			int error;
   1182 
   1183 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1184 			     swslot, 0,0,0);
   1185 
   1186 			/*
   1187 			 * page in the swapped-out page.
   1188 			 * unlock object for i/o, relock when done.
   1189 			 */
   1190 
   1191 			mutex_exit(&uobj->vmobjlock);
   1192 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1193 			mutex_enter(&uobj->vmobjlock);
   1194 
   1195 			/*
   1196 			 * I/O done.  check for errors.
   1197 			 */
   1198 
   1199 			if (error != 0) {
   1200 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1201 				    error,0,0,0);
   1202 				if (ptmp->flags & PG_WANTED)
   1203 					wakeup(ptmp);
   1204 
   1205 				/*
   1206 				 * remove the swap slot from the aobj
   1207 				 * and mark the aobj as having no real slot.
   1208 				 * don't free the swap slot, thus preventing
   1209 				 * it from being used again.
   1210 				 */
   1211 
   1212 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1213 							SWSLOT_BAD);
   1214 				if (swslot > 0) {
   1215 					uvm_swap_markbad(swslot, 1);
   1216 				}
   1217 
   1218 				mutex_enter(&uvm_pageqlock);
   1219 				uvm_pagefree(ptmp);
   1220 				mutex_exit(&uvm_pageqlock);
   1221 				mutex_exit(&uobj->vmobjlock);
   1222 				return error;
   1223 			}
   1224 #else /* defined(VMSWAP) */
   1225 			panic("%s: pagein", __func__);
   1226 #endif /* defined(VMSWAP) */
   1227 		}
   1228 
   1229 		if ((access_type & VM_PROT_WRITE) == 0) {
   1230 			ptmp->flags |= PG_CLEAN;
   1231 			pmap_clear_modify(ptmp);
   1232 		}
   1233 
   1234 		/*
   1235  		 * we got the page!   clear the fake flag (indicates valid
   1236 		 * data now in page) and plug into our result array.   note
   1237 		 * that page is still busy.
   1238  		 *
   1239  		 * it is the callers job to:
   1240  		 * => check if the page is released
   1241  		 * => unbusy the page
   1242  		 * => activate the page
   1243  		 */
   1244 
   1245 		ptmp->flags &= ~PG_FAKE;
   1246 		pps[lcv] = ptmp;
   1247 	}
   1248 
   1249 	/*
   1250  	 * finally, unlock object and return.
   1251  	 */
   1252 
   1253 done:
   1254 	mutex_exit(&uobj->vmobjlock);
   1255 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1256 	return 0;
   1257 }
   1258 
   1259 #if defined(VMSWAP)
   1260 
   1261 /*
   1262  * uao_dropswap:  release any swap resources from this aobj page.
   1263  *
   1264  * => aobj must be locked or have a reference count of 0.
   1265  */
   1266 
   1267 void
   1268 uao_dropswap(struct uvm_object *uobj, int pageidx)
   1269 {
   1270 	int slot;
   1271 
   1272 	slot = uao_set_swslot(uobj, pageidx, 0);
   1273 	if (slot) {
   1274 		uvm_swap_free(slot, 1);
   1275 	}
   1276 }
   1277 
   1278 /*
   1279  * page in every page in every aobj that is paged-out to a range of swslots.
   1280  *
   1281  * => nothing should be locked.
   1282  * => returns true if pagein was aborted due to lack of memory.
   1283  */
   1284 
   1285 bool
   1286 uao_swap_off(int startslot, int endslot)
   1287 {
   1288 	struct uvm_aobj *aobj, *nextaobj;
   1289 	bool rv;
   1290 
   1291 	/*
   1292 	 * walk the list of all aobjs.
   1293 	 */
   1294 
   1295 restart:
   1296 	mutex_enter(&uao_list_lock);
   1297 	for (aobj = LIST_FIRST(&uao_list);
   1298 	     aobj != NULL;
   1299 	     aobj = nextaobj) {
   1300 
   1301 		/*
   1302 		 * try to get the object lock, start all over if we fail.
   1303 		 * most of the time we'll get the aobj lock,
   1304 		 * so this should be a rare case.
   1305 		 */
   1306 
   1307 		if (!mutex_tryenter(&aobj->u_obj.vmobjlock)) {
   1308 			mutex_exit(&uao_list_lock);
   1309 			/* XXX Better than yielding but inadequate. */
   1310 			kpause("livelock", false, 1, NULL);
   1311 			goto restart;
   1312 		}
   1313 
   1314 		/*
   1315 		 * add a ref to the aobj so it doesn't disappear
   1316 		 * while we're working.
   1317 		 */
   1318 
   1319 		uao_reference_locked(&aobj->u_obj);
   1320 
   1321 		/*
   1322 		 * now it's safe to unlock the uao list.
   1323 		 */
   1324 
   1325 		mutex_exit(&uao_list_lock);
   1326 
   1327 		/*
   1328 		 * page in any pages in the swslot range.
   1329 		 * if there's an error, abort and return the error.
   1330 		 */
   1331 
   1332 		rv = uao_pagein(aobj, startslot, endslot);
   1333 		if (rv) {
   1334 			uao_detach_locked(&aobj->u_obj);
   1335 			return rv;
   1336 		}
   1337 
   1338 		/*
   1339 		 * we're done with this aobj.
   1340 		 * relock the list and drop our ref on the aobj.
   1341 		 */
   1342 
   1343 		mutex_enter(&uao_list_lock);
   1344 		nextaobj = LIST_NEXT(aobj, u_list);
   1345 		uao_detach_locked(&aobj->u_obj);
   1346 	}
   1347 
   1348 	/*
   1349 	 * done with traversal, unlock the list
   1350 	 */
   1351 	mutex_exit(&uao_list_lock);
   1352 	return false;
   1353 }
   1354 
   1355 
   1356 /*
   1357  * page in any pages from aobj in the given range.
   1358  *
   1359  * => aobj must be locked and is returned locked.
   1360  * => returns true if pagein was aborted due to lack of memory.
   1361  */
   1362 static bool
   1363 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1364 {
   1365 	bool rv;
   1366 
   1367 	if (UAO_USES_SWHASH(aobj)) {
   1368 		struct uao_swhash_elt *elt;
   1369 		int buck;
   1370 
   1371 restart:
   1372 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1373 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1374 			     elt != NULL;
   1375 			     elt = LIST_NEXT(elt, list)) {
   1376 				int i;
   1377 
   1378 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1379 					int slot = elt->slots[i];
   1380 
   1381 					/*
   1382 					 * if the slot isn't in range, skip it.
   1383 					 */
   1384 
   1385 					if (slot < startslot ||
   1386 					    slot >= endslot) {
   1387 						continue;
   1388 					}
   1389 
   1390 					/*
   1391 					 * process the page,
   1392 					 * the start over on this object
   1393 					 * since the swhash elt
   1394 					 * may have been freed.
   1395 					 */
   1396 
   1397 					rv = uao_pagein_page(aobj,
   1398 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1399 					if (rv) {
   1400 						return rv;
   1401 					}
   1402 					goto restart;
   1403 				}
   1404 			}
   1405 		}
   1406 	} else {
   1407 		int i;
   1408 
   1409 		for (i = 0; i < aobj->u_pages; i++) {
   1410 			int slot = aobj->u_swslots[i];
   1411 
   1412 			/*
   1413 			 * if the slot isn't in range, skip it
   1414 			 */
   1415 
   1416 			if (slot < startslot || slot >= endslot) {
   1417 				continue;
   1418 			}
   1419 
   1420 			/*
   1421 			 * process the page.
   1422 			 */
   1423 
   1424 			rv = uao_pagein_page(aobj, i);
   1425 			if (rv) {
   1426 				return rv;
   1427 			}
   1428 		}
   1429 	}
   1430 
   1431 	return false;
   1432 }
   1433 
   1434 /*
   1435  * page in a page from an aobj.  used for swap_off.
   1436  * returns true if pagein was aborted due to lack of memory.
   1437  *
   1438  * => aobj must be locked and is returned locked.
   1439  */
   1440 
   1441 static bool
   1442 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1443 {
   1444 	struct vm_page *pg;
   1445 	int rv, npages;
   1446 
   1447 	pg = NULL;
   1448 	npages = 1;
   1449 	/* locked: aobj */
   1450 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1451 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1452 	/* unlocked: aobj */
   1453 
   1454 	/*
   1455 	 * relock and finish up.
   1456 	 */
   1457 
   1458 	mutex_enter(&aobj->u_obj.vmobjlock);
   1459 	switch (rv) {
   1460 	case 0:
   1461 		break;
   1462 
   1463 	case EIO:
   1464 	case ERESTART:
   1465 
   1466 		/*
   1467 		 * nothing more to do on errors.
   1468 		 * ERESTART can only mean that the anon was freed,
   1469 		 * so again there's nothing to do.
   1470 		 */
   1471 
   1472 		return false;
   1473 
   1474 	default:
   1475 		return true;
   1476 	}
   1477 
   1478 	/*
   1479 	 * ok, we've got the page now.
   1480 	 * mark it as dirty, clear its swslot and un-busy it.
   1481 	 */
   1482 	uao_dropswap(&aobj->u_obj, pageidx);
   1483 
   1484 	/*
   1485 	 * make sure it's on a page queue.
   1486 	 */
   1487 	mutex_enter(&uvm_pageqlock);
   1488 	if (pg->wire_count == 0)
   1489 		uvm_pageenqueue(pg);
   1490 	mutex_exit(&uvm_pageqlock);
   1491 
   1492 	if (pg->flags & PG_WANTED) {
   1493 		wakeup(pg);
   1494 	}
   1495 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1496 	UVM_PAGE_OWN(pg, NULL);
   1497 
   1498 	return false;
   1499 }
   1500 
   1501 /*
   1502  * uao_dropswap_range: drop swapslots in the range.
   1503  *
   1504  * => aobj must be locked and is returned locked.
   1505  * => start is inclusive.  end is exclusive.
   1506  */
   1507 
   1508 void
   1509 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1510 {
   1511 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1512 
   1513 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1514 
   1515 	uao_dropswap_range1(aobj, start, end);
   1516 }
   1517 
   1518 static void
   1519 uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1520 {
   1521 	int swpgonlydelta = 0;
   1522 
   1523 	if (end == 0) {
   1524 		end = INT64_MAX;
   1525 	}
   1526 
   1527 	if (UAO_USES_SWHASH(aobj)) {
   1528 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1529 		voff_t taghi;
   1530 		voff_t taglo;
   1531 
   1532 		taglo = UAO_SWHASH_ELT_TAG(start);
   1533 		taghi = UAO_SWHASH_ELT_TAG(end);
   1534 
   1535 		for (i = 0; i < hashbuckets; i++) {
   1536 			struct uao_swhash_elt *elt, *next;
   1537 
   1538 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1539 			     elt != NULL;
   1540 			     elt = next) {
   1541 				int startidx, endidx;
   1542 				int j;
   1543 
   1544 				next = LIST_NEXT(elt, list);
   1545 
   1546 				if (elt->tag < taglo || taghi < elt->tag) {
   1547 					continue;
   1548 				}
   1549 
   1550 				if (elt->tag == taglo) {
   1551 					startidx =
   1552 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1553 				} else {
   1554 					startidx = 0;
   1555 				}
   1556 
   1557 				if (elt->tag == taghi) {
   1558 					endidx =
   1559 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1560 				} else {
   1561 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1562 				}
   1563 
   1564 				for (j = startidx; j < endidx; j++) {
   1565 					int slot = elt->slots[j];
   1566 
   1567 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1568 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1569 					    + j) << PAGE_SHIFT) == NULL);
   1570 					if (slot > 0) {
   1571 						uvm_swap_free(slot, 1);
   1572 						swpgonlydelta++;
   1573 						KASSERT(elt->count > 0);
   1574 						elt->slots[j] = 0;
   1575 						elt->count--;
   1576 					}
   1577 				}
   1578 
   1579 				if (elt->count == 0) {
   1580 					LIST_REMOVE(elt, list);
   1581 					pool_put(&uao_swhash_elt_pool, elt);
   1582 				}
   1583 			}
   1584 		}
   1585 	} else {
   1586 		int i;
   1587 
   1588 		if (aobj->u_pages < end) {
   1589 			end = aobj->u_pages;
   1590 		}
   1591 		for (i = start; i < end; i++) {
   1592 			int slot = aobj->u_swslots[i];
   1593 
   1594 			if (slot > 0) {
   1595 				uvm_swap_free(slot, 1);
   1596 				swpgonlydelta++;
   1597 			}
   1598 		}
   1599 	}
   1600 
   1601 	/*
   1602 	 * adjust the counter of pages only in swap for all
   1603 	 * the swap slots we've freed.
   1604 	 */
   1605 
   1606 	if (swpgonlydelta > 0) {
   1607 		mutex_enter(&uvm_swap_data_lock);
   1608 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1609 		uvmexp.swpgonly -= swpgonlydelta;
   1610 		mutex_exit(&uvm_swap_data_lock);
   1611 	}
   1612 }
   1613 
   1614 #endif /* defined(VMSWAP) */
   1615