Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.134
      1 /*	$NetBSD: uvm_aobj.c,v 1.134 2020/01/15 17:55:45 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5  *                    Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  *
     28  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     29  */
     30 
     31 /*
     32  * uvm_aobj.c: anonymous memory uvm_object pager
     33  *
     34  * author: Chuck Silvers <chuq (at) chuq.com>
     35  * started: Jan-1998
     36  *
     37  * - design mostly from Chuck Cranor
     38  */
     39 
     40 #include <sys/cdefs.h>
     41 __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.134 2020/01/15 17:55:45 ad Exp $");
     42 
     43 #ifdef _KERNEL_OPT
     44 #include "opt_uvmhist.h"
     45 #endif
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/kernel.h>
     50 #include <sys/kmem.h>
     51 #include <sys/pool.h>
     52 #include <sys/atomic.h>
     53 
     54 #include <uvm/uvm.h>
     55 #include <uvm/uvm_page_array.h>
     56 
     57 /*
     58  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
     59  * keeping the list of resident pages, it may also keep a list of allocated
     60  * swap blocks.  Depending on the size of the object, this list is either
     61  * stored in an array (small objects) or in a hash table (large objects).
     62  *
     63  * Lock order
     64  *
     65  *	uao_list_lock ->
     66  *		uvm_object::vmobjlock
     67  */
     68 
     69 /*
     70  * Note: for hash tables, we break the address space of the aobj into blocks
     71  * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
     72  */
     73 
     74 #define	UAO_SWHASH_CLUSTER_SHIFT	4
     75 #define	UAO_SWHASH_CLUSTER_SIZE		(1 << UAO_SWHASH_CLUSTER_SHIFT)
     76 
     77 /* Get the "tag" for this page index. */
     78 #define	UAO_SWHASH_ELT_TAG(idx)		((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
     79 #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
     80     ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     81 
     82 /* Given an ELT and a page index, find the swap slot. */
     83 #define	UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
     84     ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
     85 
     86 /* Given an ELT, return its pageidx base. */
     87 #define	UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     88     ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     89 
     90 /* The hash function. */
     91 #define	UAO_SWHASH_HASH(aobj, idx) \
     92     (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
     93     & (aobj)->u_swhashmask)])
     94 
     95 /*
     96  * The threshold which determines whether we will use an array or a
     97  * hash table to store the list of allocated swap blocks.
     98  */
     99 #define	UAO_SWHASH_THRESHOLD		(UAO_SWHASH_CLUSTER_SIZE * 4)
    100 #define	UAO_USES_SWHASH(aobj) \
    101     ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
    102 
    103 /* The number of buckets in a hash, with an upper bound. */
    104 #define	UAO_SWHASH_MAXBUCKETS		256
    105 #define	UAO_SWHASH_BUCKETS(aobj) \
    106     (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
    107 
    108 /*
    109  * uao_swhash_elt: when a hash table is being used, this structure defines
    110  * the format of an entry in the bucket list.
    111  */
    112 
    113 struct uao_swhash_elt {
    114 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    115 	voff_t tag;				/* our 'tag' */
    116 	int count;				/* our number of active slots */
    117 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    118 };
    119 
    120 /*
    121  * uao_swhash: the swap hash table structure
    122  */
    123 
    124 LIST_HEAD(uao_swhash, uao_swhash_elt);
    125 
    126 /*
    127  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
    128  * Note: pages for this pool must not come from a pageable kernel map.
    129  */
    130 static struct pool	uao_swhash_elt_pool	__cacheline_aligned;
    131 
    132 /*
    133  * uvm_aobj: the actual anon-backed uvm_object
    134  *
    135  * => the uvm_object is at the top of the structure, this allows
    136  *   (struct uvm_aobj *) == (struct uvm_object *)
    137  * => only one of u_swslots and u_swhash is used in any given aobj
    138  */
    139 
    140 struct uvm_aobj {
    141 	struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
    142 	pgoff_t u_pages;	 /* number of pages in entire object */
    143 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    144 	int *u_swslots;		 /* array of offset->swapslot mappings */
    145 				 /*
    146 				  * hashtable of offset->swapslot mappings
    147 				  * (u_swhash is an array of bucket heads)
    148 				  */
    149 	struct uao_swhash *u_swhash;
    150 	u_long u_swhashmask;		/* mask for hashtable */
    151 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    152 	int u_freelist;		  /* freelist to allocate pages from */
    153 };
    154 
    155 static void	uao_free(struct uvm_aobj *);
    156 static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    157 		    int *, int, vm_prot_t, int, int);
    158 static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    159 
    160 #if defined(VMSWAP)
    161 static struct uao_swhash_elt *uao_find_swhash_elt
    162     (struct uvm_aobj *, int, bool);
    163 
    164 static bool uao_pagein(struct uvm_aobj *, int, int);
    165 static bool uao_pagein_page(struct uvm_aobj *, int);
    166 #endif /* defined(VMSWAP) */
    167 
    168 static struct vm_page	*uao_pagealloc(struct uvm_object *, voff_t, int);
    169 
    170 /*
    171  * aobj_pager
    172  *
    173  * note that some functions (e.g. put) are handled elsewhere
    174  */
    175 
    176 const struct uvm_pagerops aobj_pager = {
    177 	.pgo_reference = uao_reference,
    178 	.pgo_detach = uao_detach,
    179 	.pgo_get = uao_get,
    180 	.pgo_put = uao_put,
    181 };
    182 
    183 /*
    184  * uao_list: global list of active aobjs, locked by uao_list_lock
    185  */
    186 
    187 static LIST_HEAD(aobjlist, uvm_aobj) uao_list	__cacheline_aligned;
    188 static kmutex_t		uao_list_lock		__cacheline_aligned;
    189 
    190 /*
    191  * hash table/array related functions
    192  */
    193 
    194 #if defined(VMSWAP)
    195 
    196 /*
    197  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    198  * offset.
    199  *
    200  * => the object should be locked by the caller
    201  */
    202 
    203 static struct uao_swhash_elt *
    204 uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    205 {
    206 	struct uao_swhash *swhash;
    207 	struct uao_swhash_elt *elt;
    208 	voff_t page_tag;
    209 
    210 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    211 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    212 
    213 	/*
    214 	 * now search the bucket for the requested tag
    215 	 */
    216 
    217 	LIST_FOREACH(elt, swhash, list) {
    218 		if (elt->tag == page_tag) {
    219 			return elt;
    220 		}
    221 	}
    222 	if (!create) {
    223 		return NULL;
    224 	}
    225 
    226 	/*
    227 	 * allocate a new entry for the bucket and init/insert it in
    228 	 */
    229 
    230 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    231 	if (elt == NULL) {
    232 		return NULL;
    233 	}
    234 	LIST_INSERT_HEAD(swhash, elt, list);
    235 	elt->tag = page_tag;
    236 	elt->count = 0;
    237 	memset(elt->slots, 0, sizeof(elt->slots));
    238 	return elt;
    239 }
    240 
    241 /*
    242  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    243  *
    244  * => object must be locked by caller
    245  */
    246 
    247 int
    248 uao_find_swslot(struct uvm_object *uobj, int pageidx)
    249 {
    250 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    251 	struct uao_swhash_elt *elt;
    252 
    253 	/*
    254 	 * if noswap flag is set, then we never return a slot
    255 	 */
    256 
    257 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    258 		return 0;
    259 
    260 	/*
    261 	 * if hashing, look in hash table.
    262 	 */
    263 
    264 	if (UAO_USES_SWHASH(aobj)) {
    265 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    266 		return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
    267 	}
    268 
    269 	/*
    270 	 * otherwise, look in the array
    271 	 */
    272 
    273 	return aobj->u_swslots[pageidx];
    274 }
    275 
    276 /*
    277  * uao_set_swslot: set the swap slot for a page in an aobj.
    278  *
    279  * => setting a slot to zero frees the slot
    280  * => object must be locked by caller
    281  * => we return the old slot number, or -1 if we failed to allocate
    282  *    memory to record the new slot number
    283  */
    284 
    285 int
    286 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    287 {
    288 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    289 	struct uao_swhash_elt *elt;
    290 	int oldslot;
    291 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    292 	UVMHIST_LOG(pdhist, "aobj %#jx pageidx %jd slot %jd",
    293 	    (uintptr_t)aobj, pageidx, slot, 0);
    294 
    295 	KASSERT(mutex_owned(uobj->vmobjlock) || uobj->uo_refs == 0);
    296 
    297 	/*
    298 	 * if noswap flag is set, then we can't set a non-zero slot.
    299 	 */
    300 
    301 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    302 		KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
    303 		return 0;
    304 	}
    305 
    306 	/*
    307 	 * are we using a hash table?  if so, add it in the hash.
    308 	 */
    309 
    310 	if (UAO_USES_SWHASH(aobj)) {
    311 
    312 		/*
    313 		 * Avoid allocating an entry just to free it again if
    314 		 * the page had not swap slot in the first place, and
    315 		 * we are freeing.
    316 		 */
    317 
    318 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    319 		if (elt == NULL) {
    320 			return slot ? -1 : 0;
    321 		}
    322 
    323 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    324 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    325 
    326 		/*
    327 		 * now adjust the elt's reference counter and free it if we've
    328 		 * dropped it to zero.
    329 		 */
    330 
    331 		if (slot) {
    332 			if (oldslot == 0)
    333 				elt->count++;
    334 		} else {
    335 			if (oldslot)
    336 				elt->count--;
    337 
    338 			if (elt->count == 0) {
    339 				LIST_REMOVE(elt, list);
    340 				pool_put(&uao_swhash_elt_pool, elt);
    341 			}
    342 		}
    343 	} else {
    344 		/* we are using an array */
    345 		oldslot = aobj->u_swslots[pageidx];
    346 		aobj->u_swslots[pageidx] = slot;
    347 	}
    348 	return oldslot;
    349 }
    350 
    351 #endif /* defined(VMSWAP) */
    352 
    353 /*
    354  * end of hash/array functions
    355  */
    356 
    357 /*
    358  * uao_free: free all resources held by an aobj, and then free the aobj
    359  *
    360  * => the aobj should be dead
    361  */
    362 
    363 static void
    364 uao_free(struct uvm_aobj *aobj)
    365 {
    366 	struct uvm_object *uobj = &aobj->u_obj;
    367 
    368 	KASSERT(mutex_owned(uobj->vmobjlock));
    369 	uao_dropswap_range(uobj, 0, 0);
    370 	mutex_exit(uobj->vmobjlock);
    371 
    372 #if defined(VMSWAP)
    373 	if (UAO_USES_SWHASH(aobj)) {
    374 
    375 		/*
    376 		 * free the hash table itself.
    377 		 */
    378 
    379 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
    380 	} else {
    381 
    382 		/*
    383 		 * free the array itsself.
    384 		 */
    385 
    386 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
    387 	}
    388 #endif /* defined(VMSWAP) */
    389 
    390 	/*
    391 	 * finally free the aobj itself
    392 	 */
    393 
    394 	uvm_obj_destroy(uobj, true);
    395 	kmem_free(aobj, sizeof(struct uvm_aobj));
    396 }
    397 
    398 /*
    399  * pager functions
    400  */
    401 
    402 /*
    403  * uao_create: create an aobj of the given size and return its uvm_object.
    404  *
    405  * => for normal use, flags are always zero
    406  * => for the kernel object, the flags are:
    407  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    408  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    409  */
    410 
    411 struct uvm_object *
    412 uao_create(voff_t size, int flags)
    413 {
    414 	static struct uvm_aobj kernel_object_store;
    415 	static kmutex_t kernel_object_lock __cacheline_aligned;
    416 	static int kobj_alloced __diagused = 0;
    417 	pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
    418 	struct uvm_aobj *aobj;
    419 	int refs;
    420 
    421 	/*
    422 	 * Allocate a new aobj, unless kernel object is requested.
    423 	 */
    424 
    425 	if (flags & UAO_FLAG_KERNOBJ) {
    426 		KASSERT(!kobj_alloced);
    427 		aobj = &kernel_object_store;
    428 		aobj->u_pages = pages;
    429 		aobj->u_flags = UAO_FLAG_NOSWAP;
    430 		refs = UVM_OBJ_KERN;
    431 		kobj_alloced = UAO_FLAG_KERNOBJ;
    432 	} else if (flags & UAO_FLAG_KERNSWAP) {
    433 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    434 		aobj = &kernel_object_store;
    435 		kobj_alloced = UAO_FLAG_KERNSWAP;
    436 		refs = 0xdeadbeaf; /* XXX: gcc */
    437 	} else {
    438 		aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
    439 		aobj->u_pages = pages;
    440 		aobj->u_flags = 0;
    441 		refs = 1;
    442 	}
    443 
    444 	/*
    445 	 * no freelist by default
    446 	 */
    447 
    448 	aobj->u_freelist = VM_NFREELIST;
    449 
    450 	/*
    451  	 * allocate hash/array if necessary
    452  	 *
    453  	 * note: in the KERNSWAP case no need to worry about locking since
    454  	 * we are still booting we should be the only thread around.
    455  	 */
    456 
    457 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    458 #if defined(VMSWAP)
    459 		const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
    460 
    461 		/* allocate hash table or array depending on object size */
    462 		if (UAO_USES_SWHASH(aobj)) {
    463 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    464 			    HASH_LIST, kernswap ? false : true,
    465 			    &aobj->u_swhashmask);
    466 			if (aobj->u_swhash == NULL)
    467 				panic("uao_create: hashinit swhash failed");
    468 		} else {
    469 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
    470 			    kernswap ? KM_NOSLEEP : KM_SLEEP);
    471 			if (aobj->u_swslots == NULL)
    472 				panic("uao_create: swslots allocation failed");
    473 		}
    474 #endif /* defined(VMSWAP) */
    475 
    476 		if (flags) {
    477 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    478 			return &aobj->u_obj;
    479 		}
    480 	}
    481 
    482 	/*
    483 	 * Initialise UVM object.
    484 	 */
    485 
    486 	const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
    487 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
    488 	if (__predict_false(kernobj)) {
    489 		/* Initialisation only once, for UAO_FLAG_KERNOBJ. */
    490 		mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE);
    491 		uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
    492 	}
    493 
    494 	/*
    495  	 * now that aobj is ready, add it to the global list
    496  	 */
    497 
    498 	mutex_enter(&uao_list_lock);
    499 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    500 	mutex_exit(&uao_list_lock);
    501 	return(&aobj->u_obj);
    502 }
    503 
    504 /*
    505  * uao_set_pgfl: allocate pages only from the specified freelist.
    506  *
    507  * => must be called before any pages are allocated for the object.
    508  * => reset by setting it to VM_NFREELIST, meaning any freelist.
    509  */
    510 
    511 void
    512 uao_set_pgfl(struct uvm_object *uobj, int freelist)
    513 {
    514 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    515 
    516 	KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
    517 	KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
    518 	    freelist);
    519 
    520 	aobj->u_freelist = freelist;
    521 }
    522 
    523 /*
    524  * uao_pagealloc: allocate a page for aobj.
    525  */
    526 
    527 static inline struct vm_page *
    528 uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
    529 {
    530 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    531 
    532 	if (__predict_true(aobj->u_freelist == VM_NFREELIST))
    533 		return uvm_pagealloc(uobj, offset, NULL, flags);
    534 	else
    535 		return uvm_pagealloc_strat(uobj, offset, NULL, flags,
    536 		    UVM_PGA_STRAT_ONLY, aobj->u_freelist);
    537 }
    538 
    539 /*
    540  * uao_init: set up aobj pager subsystem
    541  *
    542  * => called at boot time from uvm_pager_init()
    543  */
    544 
    545 void
    546 uao_init(void)
    547 {
    548 	static int uao_initialized;
    549 
    550 	if (uao_initialized)
    551 		return;
    552 	uao_initialized = true;
    553 	LIST_INIT(&uao_list);
    554 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    555 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    556 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
    557 }
    558 
    559 /*
    560  * uao_reference: hold a reference to an anonymous UVM object.
    561  */
    562 void
    563 uao_reference(struct uvm_object *uobj)
    564 {
    565 	/* Kernel object is persistent. */
    566 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    567 		return;
    568 	}
    569 	atomic_inc_uint(&uobj->uo_refs);
    570 }
    571 
    572 /*
    573  * uao_detach: drop a reference to an anonymous UVM object.
    574  */
    575 void
    576 uao_detach(struct uvm_object *uobj)
    577 {
    578 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    579 	struct uvm_page_array a;
    580 	struct vm_page *pg;
    581 
    582 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    583 
    584 	/*
    585 	 * Detaching from kernel object is a NOP.
    586 	 */
    587 
    588 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    589 		return;
    590 
    591 	/*
    592 	 * Drop the reference.  If it was the last one, destroy the object.
    593 	 */
    594 
    595 	KASSERT(uobj->uo_refs > 0);
    596 	UVMHIST_LOG(maphist,"  (uobj=0x%#jx)  ref=%jd",
    597 	    (uintptr_t)uobj, uobj->uo_refs, 0, 0);
    598 	if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
    599 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    600 		return;
    601 	}
    602 
    603 	/*
    604 	 * Remove the aobj from the global list.
    605 	 */
    606 
    607 	mutex_enter(&uao_list_lock);
    608 	LIST_REMOVE(aobj, u_list);
    609 	mutex_exit(&uao_list_lock);
    610 
    611 	/*
    612 	 * Free all the pages left in the aobj.  For each page, when the
    613 	 * page is no longer busy (and thus after any disk I/O that it is
    614 	 * involved in is complete), release any swap resources and free
    615 	 * the page itself.
    616 	 */
    617 	uvm_page_array_init(&a);
    618 	mutex_enter(uobj->vmobjlock);
    619 	while ((pg = uvm_page_array_fill_and_peek(&a, uobj, 0, 0, 0))
    620 	    != NULL) {
    621 		uvm_page_array_advance(&a);
    622 		pmap_page_protect(pg, VM_PROT_NONE);
    623 		if (pg->flags & PG_BUSY) {
    624 			pg->flags |= PG_WANTED;
    625 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, false,
    626 			    "uao_det", 0);
    627 			uvm_page_array_clear(&a);
    628 			mutex_enter(uobj->vmobjlock);
    629 			continue;
    630 		}
    631 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    632 		uvm_pagefree(pg);
    633 	}
    634 	uvm_page_array_fini(&a);
    635 
    636 	/*
    637 	 * Finally, free the anonymous UVM object itself.
    638 	 */
    639 
    640 	uao_free(aobj);
    641 }
    642 
    643 /*
    644  * uao_put: flush pages out of a uvm object
    645  *
    646  * => object should be locked by caller.  we may _unlock_ the object
    647  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    648  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    649  *	XXXJRT a page, we simply just deactivate it.  Should probably
    650  *	XXXJRT handle this better, in the future (although "flushing"
    651  *	XXXJRT anonymous memory isn't terribly important).
    652  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    653  *	or block.
    654  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    655  *	for flushing.
    656  * => we return 0 unless we encountered some sort of I/O error
    657  *	XXXJRT currently never happens, as we never directly initiate
    658  *	XXXJRT I/O
    659  */
    660 
    661 static int
    662 uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    663 {
    664 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    665 	struct uvm_page_array a;
    666 	struct vm_page *pg;
    667 	voff_t curoff;
    668 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    669 
    670 	KASSERT(mutex_owned(uobj->vmobjlock));
    671 
    672 	if (flags & PGO_ALLPAGES) {
    673 		start = 0;
    674 		stop = aobj->u_pages << PAGE_SHIFT;
    675 	} else {
    676 		start = trunc_page(start);
    677 		if (stop == 0) {
    678 			stop = aobj->u_pages << PAGE_SHIFT;
    679 		} else {
    680 			stop = round_page(stop);
    681 		}
    682 		if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
    683 			printf("uao_put: strange, got an out of range "
    684 			    "flush 0x%jx > 0x%jx (fixed)\n",
    685 			    (uintmax_t)stop,
    686 			    (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
    687 			stop = aobj->u_pages << PAGE_SHIFT;
    688 		}
    689 	}
    690 	UVMHIST_LOG(maphist,
    691 	    " flush start=0x%jx, stop=0x%jx, flags=0x%jx",
    692 	    start, stop, flags, 0);
    693 
    694 	/*
    695 	 * Don't need to do any work here if we're not freeing
    696 	 * or deactivating pages.
    697 	 */
    698 
    699 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    700 		mutex_exit(uobj->vmobjlock);
    701 		return 0;
    702 	}
    703 
    704 	/* locked: uobj */
    705 	uvm_page_array_init(&a);
    706 	curoff = start;
    707 	while ((pg = uvm_page_array_fill_and_peek(&a, uobj, curoff, 0, 0)) !=
    708 	    NULL) {
    709 		if (pg->offset >= stop) {
    710 			break;
    711 		}
    712 
    713 		/*
    714 		 * wait and try again if the page is busy.
    715 		 */
    716 
    717 		if (pg->flags & PG_BUSY) {
    718 			pg->flags |= PG_WANTED;
    719 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    720 			    "uao_put", 0);
    721 			uvm_page_array_clear(&a);
    722 			mutex_enter(uobj->vmobjlock);
    723 			continue;
    724 		}
    725 		uvm_page_array_advance(&a);
    726 		curoff = pg->offset + PAGE_SIZE;
    727 
    728 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    729 
    730 		/*
    731 		 * XXX In these first 3 cases, we always just
    732 		 * XXX deactivate the page.  We may want to
    733 		 * XXX handle the different cases more specifically
    734 		 * XXX in the future.
    735 		 */
    736 
    737 		case PGO_CLEANIT|PGO_FREE:
    738 		case PGO_CLEANIT|PGO_DEACTIVATE:
    739 		case PGO_DEACTIVATE:
    740  deactivate_it:
    741  			uvm_pagelock(pg);
    742 			uvm_pagedeactivate(pg);
    743  			uvm_pageunlock(pg);
    744 			break;
    745 
    746 		case PGO_FREE:
    747 			/*
    748 			 * If there are multiple references to
    749 			 * the object, just deactivate the page.
    750 			 */
    751 
    752 			if (uobj->uo_refs > 1)
    753 				goto deactivate_it;
    754 
    755 			/*
    756 			 * free the swap slot and the page.
    757 			 */
    758 
    759 			pmap_page_protect(pg, VM_PROT_NONE);
    760 
    761 			/*
    762 			 * freeing swapslot here is not strictly necessary.
    763 			 * however, leaving it here doesn't save much
    764 			 * because we need to update swap accounting anyway.
    765 			 */
    766 
    767 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    768 			uvm_pagefree(pg);
    769 			break;
    770 
    771 		default:
    772 			panic("%s: impossible", __func__);
    773 		}
    774 	}
    775 	mutex_exit(uobj->vmobjlock);
    776 	uvm_page_array_fini(&a);
    777 	return 0;
    778 }
    779 
    780 /*
    781  * uao_get: fetch me a page
    782  *
    783  * we have three cases:
    784  * 1: page is resident     -> just return the page.
    785  * 2: page is zero-fill    -> allocate a new page and zero it.
    786  * 3: page is swapped out  -> fetch the page from swap.
    787  *
    788  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    789  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    790  * then we will need to return EBUSY.
    791  *
    792  * => prefer map unlocked (not required)
    793  * => object must be locked!  we will _unlock_ it before starting any I/O.
    794  * => flags: PGO_ALLPAGES: get all of the pages
    795  *           PGO_LOCKED: fault data structures are locked
    796  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    797  * => NOTE: caller must check for released pages!!
    798  */
    799 
    800 static int
    801 uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    802     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    803 {
    804 	voff_t current_offset;
    805 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    806 	int lcv, gotpages, maxpages, swslot, pageidx;
    807 	bool done;
    808 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    809 
    810 	UVMHIST_LOG(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
    811 		    (uintptr_t)uobj, offset, flags,0);
    812 
    813 	/*
    814  	 * get number of pages
    815  	 */
    816 
    817 	maxpages = *npagesp;
    818 
    819 	/*
    820  	 * step 1: handled the case where fault data structures are locked.
    821  	 */
    822 
    823 	if (flags & PGO_LOCKED) {
    824 
    825 		/*
    826  		 * step 1a: get pages that are already resident.   only do
    827 		 * this if the data structures are locked (i.e. the first
    828 		 * time through).
    829  		 */
    830 
    831 		done = true;	/* be optimistic */
    832 		gotpages = 0;	/* # of pages we got so far */
    833 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    834 		    lcv++, current_offset += PAGE_SIZE) {
    835 			/* do we care about this page?  if not, skip it */
    836 			if (pps[lcv] == PGO_DONTCARE)
    837 				continue;
    838 			ptmp = uvm_pagelookup(uobj, current_offset);
    839 
    840 			/*
    841  			 * if page is new, attempt to allocate the page,
    842 			 * zero-fill'd.
    843  			 */
    844 
    845 			if (ptmp == NULL && uao_find_swslot(uobj,
    846 			    current_offset >> PAGE_SHIFT) == 0) {
    847 				ptmp = uao_pagealloc(uobj, current_offset,
    848 				    UVM_FLAG_COLORMATCH|UVM_PGA_ZERO);
    849 				if (ptmp) {
    850 					/* new page */
    851 					ptmp->flags &= ~(PG_FAKE);
    852 					uvm_pagemarkdirty(ptmp,
    853 					    UVM_PAGE_STATUS_UNKNOWN);
    854 					goto gotpage;
    855 				}
    856 			}
    857 
    858 			/*
    859 			 * to be useful must get a non-busy page
    860 			 */
    861 
    862 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
    863 				if (lcv == centeridx ||
    864 				    (flags & PGO_ALLPAGES) != 0)
    865 					/* need to do a wait or I/O! */
    866 					done = false;
    867 				continue;
    868 			}
    869 
    870 			/*
    871 			 * useful page: busy/lock it and plug it in our
    872 			 * result array
    873 			 */
    874 			KASSERT(uvm_pagegetdirty(ptmp) !=
    875 			    UVM_PAGE_STATUS_CLEAN);
    876 
    877 			/* caller must un-busy this page */
    878 			ptmp->flags |= PG_BUSY;
    879 			UVM_PAGE_OWN(ptmp, "uao_get1");
    880 gotpage:
    881 			pps[lcv] = ptmp;
    882 			gotpages++;
    883 		}
    884 
    885 		/*
    886  		 * step 1b: now we've either done everything needed or we
    887 		 * to unlock and do some waiting or I/O.
    888  		 */
    889 
    890 		UVMHIST_LOG(pdhist, "<- done (done=%jd)", done, 0,0,0);
    891 		*npagesp = gotpages;
    892 		if (done)
    893 			return 0;
    894 		else
    895 			return EBUSY;
    896 	}
    897 
    898 	/*
    899  	 * step 2: get non-resident or busy pages.
    900  	 * object is locked.   data structures are unlocked.
    901  	 */
    902 
    903 	if ((flags & PGO_SYNCIO) == 0) {
    904 		goto done;
    905 	}
    906 
    907 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    908 	    lcv++, current_offset += PAGE_SIZE) {
    909 
    910 		/*
    911 		 * - skip over pages we've already gotten or don't want
    912 		 * - skip over pages we don't _have_ to get
    913 		 */
    914 
    915 		if (pps[lcv] != NULL ||
    916 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
    917 			continue;
    918 
    919 		pageidx = current_offset >> PAGE_SHIFT;
    920 
    921 		/*
    922  		 * we have yet to locate the current page (pps[lcv]).   we
    923 		 * first look for a page that is already at the current offset.
    924 		 * if we find a page, we check to see if it is busy or
    925 		 * released.  if that is the case, then we sleep on the page
    926 		 * until it is no longer busy or released and repeat the lookup.
    927 		 * if the page we found is neither busy nor released, then we
    928 		 * busy it (so we own it) and plug it into pps[lcv].   this
    929 		 * 'break's the following while loop and indicates we are
    930 		 * ready to move on to the next page in the "lcv" loop above.
    931  		 *
    932  		 * if we exit the while loop with pps[lcv] still set to NULL,
    933 		 * then it means that we allocated a new busy/fake/clean page
    934 		 * ptmp in the object and we need to do I/O to fill in the data.
    935  		 */
    936 
    937 		/* top of "pps" while loop */
    938 		while (pps[lcv] == NULL) {
    939 			/* look for a resident page */
    940 			ptmp = uvm_pagelookup(uobj, current_offset);
    941 
    942 			/* not resident?   allocate one now (if we can) */
    943 			if (ptmp == NULL) {
    944 
    945 				ptmp = uao_pagealloc(uobj, current_offset, 0);
    946 
    947 				/* out of RAM? */
    948 				if (ptmp == NULL) {
    949 					mutex_exit(uobj->vmobjlock);
    950 					UVMHIST_LOG(pdhist,
    951 					    "sleeping, ptmp == NULL\n",0,0,0,0);
    952 					uvm_wait("uao_getpage");
    953 					mutex_enter(uobj->vmobjlock);
    954 					continue;
    955 				}
    956 
    957 				/*
    958 				 * got new page ready for I/O.  break pps while
    959 				 * loop.  pps[lcv] is still NULL.
    960 				 */
    961 
    962 				break;
    963 			}
    964 
    965 			/* page is there, see if we need to wait on it */
    966 			if ((ptmp->flags & PG_BUSY) != 0) {
    967 				ptmp->flags |= PG_WANTED;
    968 				UVMHIST_LOG(pdhist,
    969 				    "sleeping, ptmp->flags 0x%jx\n",
    970 				    ptmp->flags,0,0,0);
    971 				UVM_UNLOCK_AND_WAIT(ptmp, uobj->vmobjlock,
    972 				    false, "uao_get", 0);
    973 				mutex_enter(uobj->vmobjlock);
    974 				continue;
    975 			}
    976 
    977 			/*
    978  			 * if we get here then the page has become resident and
    979 			 * unbusy between steps 1 and 2.  we busy it now (so we
    980 			 * own it) and set pps[lcv] (so that we exit the while
    981 			 * loop).
    982  			 */
    983 
    984 			KASSERT(uvm_pagegetdirty(ptmp) !=
    985 			    UVM_PAGE_STATUS_CLEAN);
    986 			/* we own it, caller must un-busy */
    987 			ptmp->flags |= PG_BUSY;
    988 			UVM_PAGE_OWN(ptmp, "uao_get2");
    989 			pps[lcv] = ptmp;
    990 		}
    991 
    992 		/*
    993  		 * if we own the valid page at the correct offset, pps[lcv] will
    994  		 * point to it.   nothing more to do except go to the next page.
    995  		 */
    996 
    997 		if (pps[lcv])
    998 			continue;			/* next lcv */
    999 
   1000 		/*
   1001  		 * we have a "fake/busy/clean" page that we just allocated.
   1002  		 * do the needed "i/o", either reading from swap or zeroing.
   1003  		 */
   1004 
   1005 		swslot = uao_find_swslot(uobj, pageidx);
   1006 
   1007 		/*
   1008  		 * just zero the page if there's nothing in swap.
   1009  		 */
   1010 
   1011 		if (swslot == 0) {
   1012 
   1013 			/*
   1014 			 * page hasn't existed before, just zero it.
   1015 			 */
   1016 
   1017 			uvm_pagezero(ptmp);
   1018 		} else {
   1019 #if defined(VMSWAP)
   1020 			int error;
   1021 
   1022 			UVMHIST_LOG(pdhist, "pagein from swslot %jd",
   1023 			     swslot, 0,0,0);
   1024 
   1025 			/*
   1026 			 * page in the swapped-out page.
   1027 			 * unlock object for i/o, relock when done.
   1028 			 */
   1029 
   1030 			mutex_exit(uobj->vmobjlock);
   1031 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1032 			mutex_enter(uobj->vmobjlock);
   1033 
   1034 			/*
   1035 			 * I/O done.  check for errors.
   1036 			 */
   1037 
   1038 			if (error != 0) {
   1039 				UVMHIST_LOG(pdhist, "<- done (error=%jd)",
   1040 				    error,0,0,0);
   1041 				if (ptmp->flags & PG_WANTED)
   1042 					wakeup(ptmp);
   1043 
   1044 				/*
   1045 				 * remove the swap slot from the aobj
   1046 				 * and mark the aobj as having no real slot.
   1047 				 * don't free the swap slot, thus preventing
   1048 				 * it from being used again.
   1049 				 */
   1050 
   1051 				swslot = uao_set_swslot(uobj, pageidx,
   1052 				    SWSLOT_BAD);
   1053 				if (swslot > 0) {
   1054 					uvm_swap_markbad(swslot, 1);
   1055 				}
   1056 
   1057 				uvm_pagefree(ptmp);
   1058 				mutex_exit(uobj->vmobjlock);
   1059 				return error;
   1060 			}
   1061 #else /* defined(VMSWAP) */
   1062 			panic("%s: pagein", __func__);
   1063 #endif /* defined(VMSWAP) */
   1064 		}
   1065 
   1066 		/*
   1067 		 * note that we will allow the page being writably-mapped
   1068 		 * (!PG_RDONLY) regardless of access_type.
   1069 		 */
   1070 		uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
   1071 
   1072 		/*
   1073  		 * we got the page!   clear the fake flag (indicates valid
   1074 		 * data now in page) and plug into our result array.   note
   1075 		 * that page is still busy.
   1076  		 *
   1077  		 * it is the callers job to:
   1078  		 * => check if the page is released
   1079  		 * => unbusy the page
   1080  		 * => activate the page
   1081  		 */
   1082 		KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
   1083 		KASSERT((ptmp->flags & PG_FAKE) != 0);
   1084 		ptmp->flags &= ~PG_FAKE;
   1085 		pps[lcv] = ptmp;
   1086 	}
   1087 
   1088 	/*
   1089  	 * finally, unlock object and return.
   1090  	 */
   1091 
   1092 done:
   1093 	mutex_exit(uobj->vmobjlock);
   1094 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1095 	return 0;
   1096 }
   1097 
   1098 #if defined(VMSWAP)
   1099 
   1100 /*
   1101  * uao_dropswap:  release any swap resources from this aobj page.
   1102  *
   1103  * => aobj must be locked or have a reference count of 0.
   1104  */
   1105 
   1106 void
   1107 uao_dropswap(struct uvm_object *uobj, int pageidx)
   1108 {
   1109 	int slot;
   1110 
   1111 	slot = uao_set_swslot(uobj, pageidx, 0);
   1112 	if (slot) {
   1113 		uvm_swap_free(slot, 1);
   1114 	}
   1115 }
   1116 
   1117 /*
   1118  * page in every page in every aobj that is paged-out to a range of swslots.
   1119  *
   1120  * => nothing should be locked.
   1121  * => returns true if pagein was aborted due to lack of memory.
   1122  */
   1123 
   1124 bool
   1125 uao_swap_off(int startslot, int endslot)
   1126 {
   1127 	struct uvm_aobj *aobj;
   1128 
   1129 	/*
   1130 	 * Walk the list of all anonymous UVM objects.  Grab the first.
   1131 	 */
   1132 	mutex_enter(&uao_list_lock);
   1133 	if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
   1134 		mutex_exit(&uao_list_lock);
   1135 		return false;
   1136 	}
   1137 	uao_reference(&aobj->u_obj);
   1138 
   1139 	do {
   1140 		struct uvm_aobj *nextaobj;
   1141 		bool rv;
   1142 
   1143 		/*
   1144 		 * Prefetch the next object and immediately hold a reference
   1145 		 * on it, so neither the current nor the next entry could
   1146 		 * disappear while we are iterating.
   1147 		 */
   1148 		if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
   1149 			uao_reference(&nextaobj->u_obj);
   1150 		}
   1151 		mutex_exit(&uao_list_lock);
   1152 
   1153 		/*
   1154 		 * Page in all pages in the swap slot range.
   1155 		 */
   1156 		mutex_enter(aobj->u_obj.vmobjlock);
   1157 		rv = uao_pagein(aobj, startslot, endslot);
   1158 		mutex_exit(aobj->u_obj.vmobjlock);
   1159 
   1160 		/* Drop the reference of the current object. */
   1161 		uao_detach(&aobj->u_obj);
   1162 		if (rv) {
   1163 			if (nextaobj) {
   1164 				uao_detach(&nextaobj->u_obj);
   1165 			}
   1166 			return rv;
   1167 		}
   1168 
   1169 		aobj = nextaobj;
   1170 		mutex_enter(&uao_list_lock);
   1171 	} while (aobj);
   1172 
   1173 	mutex_exit(&uao_list_lock);
   1174 	return false;
   1175 }
   1176 
   1177 /*
   1178  * page in any pages from aobj in the given range.
   1179  *
   1180  * => aobj must be locked and is returned locked.
   1181  * => returns true if pagein was aborted due to lack of memory.
   1182  */
   1183 static bool
   1184 uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1185 {
   1186 	bool rv;
   1187 
   1188 	if (UAO_USES_SWHASH(aobj)) {
   1189 		struct uao_swhash_elt *elt;
   1190 		int buck;
   1191 
   1192 restart:
   1193 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1194 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1195 			     elt != NULL;
   1196 			     elt = LIST_NEXT(elt, list)) {
   1197 				int i;
   1198 
   1199 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1200 					int slot = elt->slots[i];
   1201 
   1202 					/*
   1203 					 * if the slot isn't in range, skip it.
   1204 					 */
   1205 
   1206 					if (slot < startslot ||
   1207 					    slot >= endslot) {
   1208 						continue;
   1209 					}
   1210 
   1211 					/*
   1212 					 * process the page,
   1213 					 * the start over on this object
   1214 					 * since the swhash elt
   1215 					 * may have been freed.
   1216 					 */
   1217 
   1218 					rv = uao_pagein_page(aobj,
   1219 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1220 					if (rv) {
   1221 						return rv;
   1222 					}
   1223 					goto restart;
   1224 				}
   1225 			}
   1226 		}
   1227 	} else {
   1228 		int i;
   1229 
   1230 		for (i = 0; i < aobj->u_pages; i++) {
   1231 			int slot = aobj->u_swslots[i];
   1232 
   1233 			/*
   1234 			 * if the slot isn't in range, skip it
   1235 			 */
   1236 
   1237 			if (slot < startslot || slot >= endslot) {
   1238 				continue;
   1239 			}
   1240 
   1241 			/*
   1242 			 * process the page.
   1243 			 */
   1244 
   1245 			rv = uao_pagein_page(aobj, i);
   1246 			if (rv) {
   1247 				return rv;
   1248 			}
   1249 		}
   1250 	}
   1251 
   1252 	return false;
   1253 }
   1254 
   1255 /*
   1256  * uao_pagein_page: page in a single page from an anonymous UVM object.
   1257  *
   1258  * => Returns true if pagein was aborted due to lack of memory.
   1259  * => Object must be locked and is returned locked.
   1260  */
   1261 
   1262 static bool
   1263 uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1264 {
   1265 	struct uvm_object *uobj = &aobj->u_obj;
   1266 	struct vm_page *pg;
   1267 	int rv, npages;
   1268 
   1269 	pg = NULL;
   1270 	npages = 1;
   1271 
   1272 	KASSERT(mutex_owned(uobj->vmobjlock));
   1273 	rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
   1274 	    0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
   1275 
   1276 	/*
   1277 	 * relock and finish up.
   1278 	 */
   1279 
   1280 	mutex_enter(uobj->vmobjlock);
   1281 	switch (rv) {
   1282 	case 0:
   1283 		break;
   1284 
   1285 	case EIO:
   1286 	case ERESTART:
   1287 
   1288 		/*
   1289 		 * nothing more to do on errors.
   1290 		 * ERESTART can only mean that the anon was freed,
   1291 		 * so again there's nothing to do.
   1292 		 */
   1293 
   1294 		return false;
   1295 
   1296 	default:
   1297 		return true;
   1298 	}
   1299 
   1300 	/*
   1301 	 * ok, we've got the page now.
   1302 	 * mark it as dirty, clear its swslot and un-busy it.
   1303 	 */
   1304 	uao_dropswap(&aobj->u_obj, pageidx);
   1305 
   1306 	/*
   1307 	 * make sure it's on a page queue.
   1308 	 */
   1309 	uvm_pagelock(pg);
   1310 	uvm_pageenqueue(pg);
   1311 	uvm_pageunlock(pg);
   1312 
   1313 	if (pg->flags & PG_WANTED) {
   1314 		wakeup(pg);
   1315 	}
   1316 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
   1317 	uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   1318 	UVM_PAGE_OWN(pg, NULL);
   1319 
   1320 	return false;
   1321 }
   1322 
   1323 /*
   1324  * uao_dropswap_range: drop swapslots in the range.
   1325  *
   1326  * => aobj must be locked and is returned locked.
   1327  * => start is inclusive.  end is exclusive.
   1328  */
   1329 
   1330 void
   1331 uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1332 {
   1333 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1334 	int swpgonlydelta = 0;
   1335 
   1336 	KASSERT(mutex_owned(uobj->vmobjlock));
   1337 
   1338 	if (end == 0) {
   1339 		end = INT64_MAX;
   1340 	}
   1341 
   1342 	if (UAO_USES_SWHASH(aobj)) {
   1343 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1344 		voff_t taghi;
   1345 		voff_t taglo;
   1346 
   1347 		taglo = UAO_SWHASH_ELT_TAG(start);
   1348 		taghi = UAO_SWHASH_ELT_TAG(end);
   1349 
   1350 		for (i = 0; i < hashbuckets; i++) {
   1351 			struct uao_swhash_elt *elt, *next;
   1352 
   1353 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1354 			     elt != NULL;
   1355 			     elt = next) {
   1356 				int startidx, endidx;
   1357 				int j;
   1358 
   1359 				next = LIST_NEXT(elt, list);
   1360 
   1361 				if (elt->tag < taglo || taghi < elt->tag) {
   1362 					continue;
   1363 				}
   1364 
   1365 				if (elt->tag == taglo) {
   1366 					startidx =
   1367 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1368 				} else {
   1369 					startidx = 0;
   1370 				}
   1371 
   1372 				if (elt->tag == taghi) {
   1373 					endidx =
   1374 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1375 				} else {
   1376 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1377 				}
   1378 
   1379 				for (j = startidx; j < endidx; j++) {
   1380 					int slot = elt->slots[j];
   1381 
   1382 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1383 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1384 					    + j) << PAGE_SHIFT) == NULL);
   1385 					if (slot > 0) {
   1386 						uvm_swap_free(slot, 1);
   1387 						swpgonlydelta++;
   1388 						KASSERT(elt->count > 0);
   1389 						elt->slots[j] = 0;
   1390 						elt->count--;
   1391 					}
   1392 				}
   1393 
   1394 				if (elt->count == 0) {
   1395 					LIST_REMOVE(elt, list);
   1396 					pool_put(&uao_swhash_elt_pool, elt);
   1397 				}
   1398 			}
   1399 		}
   1400 	} else {
   1401 		int i;
   1402 
   1403 		if (aobj->u_pages < end) {
   1404 			end = aobj->u_pages;
   1405 		}
   1406 		for (i = start; i < end; i++) {
   1407 			int slot = aobj->u_swslots[i];
   1408 
   1409 			if (slot > 0) {
   1410 				uvm_swap_free(slot, 1);
   1411 				swpgonlydelta++;
   1412 			}
   1413 		}
   1414 	}
   1415 
   1416 	/*
   1417 	 * adjust the counter of pages only in swap for all
   1418 	 * the swap slots we've freed.
   1419 	 */
   1420 
   1421 	if (swpgonlydelta > 0) {
   1422 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1423 		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
   1424 	}
   1425 }
   1426 
   1427 #endif /* defined(VMSWAP) */
   1428