Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.151.2.1
      1  1.151.2.1   thorpej /*	$NetBSD: uvm_aobj.c,v 1.151.2.1 2020/12/14 14:38:17 thorpej Exp $	*/
      2        1.6       mrg 
      3        1.7       chs /*
      4        1.7       chs  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5        1.7       chs  *                    Washington University.
      6        1.7       chs  * All rights reserved.
      7        1.7       chs  *
      8        1.7       chs  * Redistribution and use in source and binary forms, with or without
      9        1.7       chs  * modification, are permitted provided that the following conditions
     10        1.7       chs  * are met:
     11        1.7       chs  * 1. Redistributions of source code must retain the above copyright
     12        1.7       chs  *    notice, this list of conditions and the following disclaimer.
     13        1.7       chs  * 2. Redistributions in binary form must reproduce the above copyright
     14        1.7       chs  *    notice, this list of conditions and the following disclaimer in the
     15        1.7       chs  *    documentation and/or other materials provided with the distribution.
     16        1.7       chs  *
     17        1.7       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18        1.7       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19        1.7       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20        1.7       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21        1.7       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22        1.7       chs  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23        1.7       chs  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24        1.7       chs  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25        1.7       chs  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26        1.7       chs  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27        1.7       chs  *
     28        1.4       mrg  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     29        1.4       mrg  */
     30      1.113     rmind 
     31        1.7       chs /*
     32        1.7       chs  * uvm_aobj.c: anonymous memory uvm_object pager
     33        1.7       chs  *
     34        1.7       chs  * author: Chuck Silvers <chuq (at) chuq.com>
     35        1.7       chs  * started: Jan-1998
     36        1.7       chs  *
     37        1.7       chs  * - design mostly from Chuck Cranor
     38        1.7       chs  */
     39       1.49     lukem 
     40       1.49     lukem #include <sys/cdefs.h>
     41  1.151.2.1   thorpej __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.151.2.1 2020/12/14 14:38:17 thorpej Exp $");
     42        1.7       chs 
     43      1.123     pooka #ifdef _KERNEL_OPT
     44        1.7       chs #include "opt_uvmhist.h"
     45      1.123     pooka #endif
     46        1.1       mrg 
     47        1.1       mrg #include <sys/param.h>
     48        1.1       mrg #include <sys/systm.h>
     49       1.37       chs #include <sys/kernel.h>
     50      1.104     rmind #include <sys/kmem.h>
     51       1.12   thorpej #include <sys/pool.h>
     52      1.119      matt #include <sys/atomic.h>
     53        1.1       mrg 
     54        1.1       mrg #include <uvm/uvm.h>
     55      1.132        ad #include <uvm/uvm_page_array.h>
     56        1.1       mrg 
     57        1.1       mrg /*
     58      1.117     rmind  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
     59      1.117     rmind  * keeping the list of resident pages, it may also keep a list of allocated
     60      1.117     rmind  * swap blocks.  Depending on the size of the object, this list is either
     61      1.117     rmind  * stored in an array (small objects) or in a hash table (large objects).
     62      1.117     rmind  *
     63      1.117     rmind  * Lock order
     64      1.117     rmind  *
     65      1.118     rmind  *	uao_list_lock ->
     66      1.118     rmind  *		uvm_object::vmobjlock
     67        1.1       mrg  */
     68        1.1       mrg 
     69        1.1       mrg /*
     70      1.117     rmind  * Note: for hash tables, we break the address space of the aobj into blocks
     71      1.117     rmind  * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
     72        1.1       mrg  */
     73        1.1       mrg 
     74      1.117     rmind #define	UAO_SWHASH_CLUSTER_SHIFT	4
     75      1.117     rmind #define	UAO_SWHASH_CLUSTER_SIZE		(1 << UAO_SWHASH_CLUSTER_SHIFT)
     76        1.1       mrg 
     77      1.117     rmind /* Get the "tag" for this page index. */
     78      1.117     rmind #define	UAO_SWHASH_ELT_TAG(idx)		((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
     79      1.117     rmind #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
     80      1.117     rmind     ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     81        1.1       mrg 
     82      1.117     rmind /* Given an ELT and a page index, find the swap slot. */
     83      1.117     rmind #define	UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
     84      1.117     rmind     ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
     85       1.75      yamt 
     86      1.117     rmind /* Given an ELT, return its pageidx base. */
     87      1.117     rmind #define	UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     88      1.117     rmind     ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     89        1.1       mrg 
     90      1.117     rmind /* The hash function. */
     91      1.117     rmind #define	UAO_SWHASH_HASH(aobj, idx) \
     92      1.117     rmind     (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
     93      1.117     rmind     & (aobj)->u_swhashmask)])
     94        1.1       mrg 
     95        1.1       mrg /*
     96      1.117     rmind  * The threshold which determines whether we will use an array or a
     97        1.1       mrg  * hash table to store the list of allocated swap blocks.
     98        1.1       mrg  */
     99      1.117     rmind #define	UAO_SWHASH_THRESHOLD		(UAO_SWHASH_CLUSTER_SIZE * 4)
    100      1.117     rmind #define	UAO_USES_SWHASH(aobj) \
    101      1.117     rmind     ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
    102      1.117     rmind 
    103      1.117     rmind /* The number of buckets in a hash, with an upper bound. */
    104      1.117     rmind #define	UAO_SWHASH_MAXBUCKETS		256
    105      1.117     rmind #define	UAO_SWHASH_BUCKETS(aobj) \
    106      1.117     rmind     (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
    107        1.1       mrg 
    108        1.1       mrg /*
    109        1.1       mrg  * uao_swhash_elt: when a hash table is being used, this structure defines
    110        1.1       mrg  * the format of an entry in the bucket list.
    111        1.1       mrg  */
    112        1.1       mrg 
    113        1.1       mrg struct uao_swhash_elt {
    114        1.5       mrg 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    115       1.28    kleink 	voff_t tag;				/* our 'tag' */
    116        1.5       mrg 	int count;				/* our number of active slots */
    117        1.5       mrg 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    118        1.1       mrg };
    119        1.1       mrg 
    120        1.1       mrg /*
    121        1.1       mrg  * uao_swhash: the swap hash table structure
    122        1.1       mrg  */
    123        1.1       mrg 
    124        1.1       mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
    125        1.1       mrg 
    126       1.12   thorpej /*
    127      1.113     rmind  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
    128      1.113     rmind  * Note: pages for this pool must not come from a pageable kernel map.
    129       1.12   thorpej  */
    130      1.117     rmind static struct pool	uao_swhash_elt_pool	__cacheline_aligned;
    131        1.1       mrg 
    132        1.1       mrg /*
    133        1.1       mrg  * uvm_aobj: the actual anon-backed uvm_object
    134        1.1       mrg  *
    135        1.1       mrg  * => the uvm_object is at the top of the structure, this allows
    136       1.46       chs  *   (struct uvm_aobj *) == (struct uvm_object *)
    137        1.1       mrg  * => only one of u_swslots and u_swhash is used in any given aobj
    138        1.1       mrg  */
    139        1.1       mrg 
    140        1.1       mrg struct uvm_aobj {
    141      1.132        ad 	struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
    142       1.79    cherry 	pgoff_t u_pages;	 /* number of pages in entire object */
    143        1.5       mrg 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    144        1.5       mrg 	int *u_swslots;		 /* array of offset->swapslot mappings */
    145        1.5       mrg 				 /*
    146        1.5       mrg 				  * hashtable of offset->swapslot mappings
    147        1.5       mrg 				  * (u_swhash is an array of bucket heads)
    148        1.5       mrg 				  */
    149        1.5       mrg 	struct uao_swhash *u_swhash;
    150        1.5       mrg 	u_long u_swhashmask;		/* mask for hashtable */
    151        1.5       mrg 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    152      1.121  riastrad 	int u_freelist;		  /* freelist to allocate pages from */
    153        1.1       mrg };
    154        1.1       mrg 
    155       1.62  junyoung static void	uao_free(struct uvm_aobj *);
    156       1.62  junyoung static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    157       1.62  junyoung 		    int *, int, vm_prot_t, int, int);
    158       1.86      matt static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    159       1.72      yamt 
    160       1.72      yamt #if defined(VMSWAP)
    161       1.72      yamt static struct uao_swhash_elt *uao_find_swhash_elt
    162       1.85   thorpej     (struct uvm_aobj *, int, bool);
    163       1.72      yamt 
    164       1.85   thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
    165       1.85   thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
    166       1.72      yamt #endif /* defined(VMSWAP) */
    167        1.1       mrg 
    168      1.121  riastrad static struct vm_page	*uao_pagealloc(struct uvm_object *, voff_t, int);
    169      1.121  riastrad 
    170        1.1       mrg /*
    171        1.1       mrg  * aobj_pager
    172       1.41       chs  *
    173        1.1       mrg  * note that some functions (e.g. put) are handled elsewhere
    174        1.1       mrg  */
    175        1.1       mrg 
    176       1.95      yamt const struct uvm_pagerops aobj_pager = {
    177       1.94      yamt 	.pgo_reference = uao_reference,
    178       1.94      yamt 	.pgo_detach = uao_detach,
    179       1.94      yamt 	.pgo_get = uao_get,
    180       1.94      yamt 	.pgo_put = uao_put,
    181        1.1       mrg };
    182        1.1       mrg 
    183        1.1       mrg /*
    184        1.1       mrg  * uao_list: global list of active aobjs, locked by uao_list_lock
    185        1.1       mrg  */
    186        1.1       mrg 
    187      1.117     rmind static LIST_HEAD(aobjlist, uvm_aobj) uao_list	__cacheline_aligned;
    188      1.117     rmind static kmutex_t		uao_list_lock		__cacheline_aligned;
    189        1.1       mrg 
    190        1.1       mrg /*
    191        1.1       mrg  * hash table/array related functions
    192        1.1       mrg  */
    193        1.1       mrg 
    194       1.72      yamt #if defined(VMSWAP)
    195       1.72      yamt 
    196        1.1       mrg /*
    197        1.1       mrg  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    198        1.1       mrg  * offset.
    199        1.1       mrg  *
    200        1.1       mrg  * => the object should be locked by the caller
    201        1.1       mrg  */
    202        1.1       mrg 
    203        1.5       mrg static struct uao_swhash_elt *
    204       1.85   thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    205        1.5       mrg {
    206        1.5       mrg 	struct uao_swhash *swhash;
    207        1.5       mrg 	struct uao_swhash_elt *elt;
    208       1.28    kleink 	voff_t page_tag;
    209        1.1       mrg 
    210       1.45       chs 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    211       1.45       chs 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    212        1.1       mrg 
    213        1.5       mrg 	/*
    214        1.5       mrg 	 * now search the bucket for the requested tag
    215        1.5       mrg 	 */
    216       1.45       chs 
    217       1.37       chs 	LIST_FOREACH(elt, swhash, list) {
    218       1.45       chs 		if (elt->tag == page_tag) {
    219       1.45       chs 			return elt;
    220       1.45       chs 		}
    221        1.5       mrg 	}
    222       1.45       chs 	if (!create) {
    223        1.5       mrg 		return NULL;
    224       1.45       chs 	}
    225        1.5       mrg 
    226        1.5       mrg 	/*
    227       1.12   thorpej 	 * allocate a new entry for the bucket and init/insert it in
    228        1.5       mrg 	 */
    229       1.45       chs 
    230       1.45       chs 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    231       1.45       chs 	if (elt == NULL) {
    232       1.45       chs 		return NULL;
    233       1.45       chs 	}
    234        1.5       mrg 	LIST_INSERT_HEAD(swhash, elt, list);
    235        1.5       mrg 	elt->tag = page_tag;
    236        1.5       mrg 	elt->count = 0;
    237        1.9     perry 	memset(elt->slots, 0, sizeof(elt->slots));
    238       1.45       chs 	return elt;
    239        1.1       mrg }
    240        1.1       mrg 
    241        1.1       mrg /*
    242        1.1       mrg  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    243        1.1       mrg  *
    244       1.41       chs  * => object must be locked by caller
    245        1.1       mrg  */
    246       1.46       chs 
    247       1.46       chs int
    248       1.67   thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
    249        1.1       mrg {
    250       1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    251       1.46       chs 	struct uao_swhash_elt *elt;
    252        1.1       mrg 
    253      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    254      1.141        ad 
    255        1.5       mrg 	/*
    256        1.5       mrg 	 * if noswap flag is set, then we never return a slot
    257        1.5       mrg 	 */
    258        1.1       mrg 
    259        1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    260      1.117     rmind 		return 0;
    261        1.1       mrg 
    262        1.5       mrg 	/*
    263        1.5       mrg 	 * if hashing, look in hash table.
    264        1.5       mrg 	 */
    265        1.1       mrg 
    266        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    267       1.87   thorpej 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    268      1.117     rmind 		return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
    269        1.5       mrg 	}
    270        1.1       mrg 
    271       1.41       chs 	/*
    272        1.5       mrg 	 * otherwise, look in the array
    273        1.5       mrg 	 */
    274       1.46       chs 
    275      1.117     rmind 	return aobj->u_swslots[pageidx];
    276        1.1       mrg }
    277        1.1       mrg 
    278        1.1       mrg /*
    279        1.1       mrg  * uao_set_swslot: set the swap slot for a page in an aobj.
    280        1.1       mrg  *
    281        1.1       mrg  * => setting a slot to zero frees the slot
    282        1.1       mrg  * => object must be locked by caller
    283       1.45       chs  * => we return the old slot number, or -1 if we failed to allocate
    284       1.45       chs  *    memory to record the new slot number
    285        1.1       mrg  */
    286       1.46       chs 
    287        1.5       mrg int
    288       1.67   thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    289        1.5       mrg {
    290        1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    291       1.45       chs 	struct uao_swhash_elt *elt;
    292        1.5       mrg 	int oldslot;
    293      1.149     skrll 	UVMHIST_FUNC(__func__);
    294      1.149     skrll 	UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
    295      1.126  pgoyette 	    (uintptr_t)aobj, pageidx, slot, 0);
    296        1.1       mrg 
    297      1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
    298      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    299      1.109     rmind 
    300        1.5       mrg 	/*
    301       1.46       chs 	 * if noswap flag is set, then we can't set a non-zero slot.
    302        1.5       mrg 	 */
    303        1.1       mrg 
    304        1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    305      1.117     rmind 		KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
    306      1.117     rmind 		return 0;
    307        1.5       mrg 	}
    308        1.1       mrg 
    309        1.5       mrg 	/*
    310        1.5       mrg 	 * are we using a hash table?  if so, add it in the hash.
    311        1.5       mrg 	 */
    312        1.1       mrg 
    313        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    314       1.39       chs 
    315       1.12   thorpej 		/*
    316       1.12   thorpej 		 * Avoid allocating an entry just to free it again if
    317       1.12   thorpej 		 * the page had not swap slot in the first place, and
    318       1.12   thorpej 		 * we are freeing.
    319       1.12   thorpej 		 */
    320       1.39       chs 
    321       1.46       chs 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    322       1.12   thorpej 		if (elt == NULL) {
    323       1.45       chs 			return slot ? -1 : 0;
    324       1.12   thorpej 		}
    325        1.5       mrg 
    326        1.5       mrg 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    327        1.5       mrg 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    328        1.5       mrg 
    329        1.5       mrg 		/*
    330        1.5       mrg 		 * now adjust the elt's reference counter and free it if we've
    331        1.5       mrg 		 * dropped it to zero.
    332        1.5       mrg 		 */
    333        1.5       mrg 
    334        1.5       mrg 		if (slot) {
    335        1.5       mrg 			if (oldslot == 0)
    336        1.5       mrg 				elt->count++;
    337       1.45       chs 		} else {
    338       1.45       chs 			if (oldslot)
    339        1.5       mrg 				elt->count--;
    340        1.5       mrg 
    341        1.5       mrg 			if (elt->count == 0) {
    342        1.5       mrg 				LIST_REMOVE(elt, list);
    343       1.12   thorpej 				pool_put(&uao_swhash_elt_pool, elt);
    344        1.5       mrg 			}
    345        1.5       mrg 		}
    346       1.41       chs 	} else {
    347        1.5       mrg 		/* we are using an array */
    348        1.5       mrg 		oldslot = aobj->u_swslots[pageidx];
    349        1.5       mrg 		aobj->u_swslots[pageidx] = slot;
    350        1.5       mrg 	}
    351      1.117     rmind 	return oldslot;
    352        1.1       mrg }
    353        1.1       mrg 
    354       1.72      yamt #endif /* defined(VMSWAP) */
    355       1.72      yamt 
    356        1.1       mrg /*
    357        1.1       mrg  * end of hash/array functions
    358        1.1       mrg  */
    359        1.1       mrg 
    360        1.1       mrg /*
    361        1.1       mrg  * uao_free: free all resources held by an aobj, and then free the aobj
    362        1.1       mrg  *
    363        1.1       mrg  * => the aobj should be dead
    364        1.1       mrg  */
    365       1.46       chs 
    366        1.1       mrg static void
    367       1.67   thorpej uao_free(struct uvm_aobj *aobj)
    368        1.1       mrg {
    369      1.117     rmind 	struct uvm_object *uobj = &aobj->u_obj;
    370       1.96        ad 
    371      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    372      1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    373      1.118     rmind 	uao_dropswap_range(uobj, 0, 0);
    374      1.135        ad 	rw_exit(uobj->vmobjlock);
    375       1.72      yamt 
    376       1.72      yamt #if defined(VMSWAP)
    377        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    378        1.1       mrg 
    379        1.5       mrg 		/*
    380       1.75      yamt 		 * free the hash table itself.
    381        1.5       mrg 		 */
    382       1.46       chs 
    383      1.104     rmind 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
    384        1.5       mrg 	} else {
    385        1.5       mrg 
    386        1.5       mrg 		/*
    387       1.75      yamt 		 * free the array itsself.
    388        1.5       mrg 		 */
    389        1.5       mrg 
    390      1.104     rmind 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
    391        1.1       mrg 	}
    392       1.72      yamt #endif /* defined(VMSWAP) */
    393       1.72      yamt 
    394        1.5       mrg 	/*
    395        1.5       mrg 	 * finally free the aobj itself
    396        1.5       mrg 	 */
    397       1.46       chs 
    398      1.117     rmind 	uvm_obj_destroy(uobj, true);
    399      1.113     rmind 	kmem_free(aobj, sizeof(struct uvm_aobj));
    400        1.1       mrg }
    401        1.1       mrg 
    402        1.1       mrg /*
    403        1.1       mrg  * pager functions
    404        1.1       mrg  */
    405        1.1       mrg 
    406        1.1       mrg /*
    407        1.1       mrg  * uao_create: create an aobj of the given size and return its uvm_object.
    408        1.1       mrg  *
    409        1.1       mrg  * => for normal use, flags are always zero
    410        1.1       mrg  * => for the kernel object, the flags are:
    411        1.1       mrg  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    412        1.1       mrg  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    413        1.1       mrg  */
    414       1.46       chs 
    415        1.5       mrg struct uvm_object *
    416      1.127       chs uao_create(voff_t size, int flags)
    417        1.5       mrg {
    418       1.46       chs 	static struct uvm_aobj kernel_object_store;
    419  1.151.2.1   thorpej 	static krwlock_t bootstrap_kernel_object_lock;
    420      1.120    martin 	static int kobj_alloced __diagused = 0;
    421      1.127       chs 	pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
    422        1.5       mrg 	struct uvm_aobj *aobj;
    423       1.66      yamt 	int refs;
    424        1.1       mrg 
    425        1.5       mrg 	/*
    426      1.114     rmind 	 * Allocate a new aobj, unless kernel object is requested.
    427       1.27       chs 	 */
    428        1.5       mrg 
    429       1.46       chs 	if (flags & UAO_FLAG_KERNOBJ) {
    430       1.46       chs 		KASSERT(!kobj_alloced);
    431        1.5       mrg 		aobj = &kernel_object_store;
    432        1.5       mrg 		aobj->u_pages = pages;
    433       1.46       chs 		aobj->u_flags = UAO_FLAG_NOSWAP;
    434       1.66      yamt 		refs = UVM_OBJ_KERN;
    435        1.5       mrg 		kobj_alloced = UAO_FLAG_KERNOBJ;
    436        1.5       mrg 	} else if (flags & UAO_FLAG_KERNSWAP) {
    437       1.46       chs 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    438        1.5       mrg 		aobj = &kernel_object_store;
    439        1.5       mrg 		kobj_alloced = UAO_FLAG_KERNSWAP;
    440       1.66      yamt 		refs = 0xdeadbeaf; /* XXX: gcc */
    441       1.46       chs 	} else {
    442      1.113     rmind 		aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
    443        1.5       mrg 		aobj->u_pages = pages;
    444       1.46       chs 		aobj->u_flags = 0;
    445       1.66      yamt 		refs = 1;
    446        1.5       mrg 	}
    447        1.1       mrg 
    448        1.5       mrg 	/*
    449      1.121  riastrad 	 * no freelist by default
    450      1.121  riastrad 	 */
    451      1.121  riastrad 
    452      1.121  riastrad 	aobj->u_freelist = VM_NFREELIST;
    453      1.121  riastrad 
    454      1.121  riastrad 	/*
    455        1.5       mrg  	 * allocate hash/array if necessary
    456        1.5       mrg  	 *
    457        1.5       mrg  	 * note: in the KERNSWAP case no need to worry about locking since
    458        1.5       mrg  	 * we are still booting we should be the only thread around.
    459        1.5       mrg  	 */
    460       1.46       chs 
    461  1.151.2.1   thorpej 	const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
    462  1.151.2.1   thorpej 	if (flags == 0 || kernswap) {
    463       1.72      yamt #if defined(VMSWAP)
    464        1.5       mrg 
    465        1.5       mrg 		/* allocate hash table or array depending on object size */
    466       1.27       chs 		if (UAO_USES_SWHASH(aobj)) {
    467      1.104     rmind 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    468  1.151.2.1   thorpej 			    HASH_LIST, true, &aobj->u_swhashmask);
    469        1.5       mrg 		} else {
    470      1.104     rmind 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
    471  1.151.2.1   thorpej 			    KM_SLEEP);
    472        1.5       mrg 		}
    473       1.72      yamt #endif /* defined(VMSWAP) */
    474        1.5       mrg 
    475  1.151.2.1   thorpej 		/*
    476  1.151.2.1   thorpej 		 * Replace kernel_object's temporary static lock with
    477  1.151.2.1   thorpej 		 * a regular rw_obj.  We cannot use uvm_obj_setlock()
    478  1.151.2.1   thorpej 		 * because that would try to free the old lock.
    479  1.151.2.1   thorpej 		 */
    480  1.151.2.1   thorpej 
    481  1.151.2.1   thorpej 		if (kernswap) {
    482  1.151.2.1   thorpej 			aobj->u_obj.vmobjlock = rw_obj_alloc();
    483  1.151.2.1   thorpej 			rw_destroy(&bootstrap_kernel_object_lock);
    484  1.151.2.1   thorpej 		}
    485        1.5       mrg 		if (flags) {
    486        1.5       mrg 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    487      1.117     rmind 			return &aobj->u_obj;
    488        1.5       mrg 		}
    489        1.5       mrg 	}
    490        1.5       mrg 
    491        1.5       mrg 	/*
    492      1.115     rmind 	 * Initialise UVM object.
    493      1.115     rmind 	 */
    494       1.46       chs 
    495      1.115     rmind 	const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
    496      1.115     rmind 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
    497      1.115     rmind 	if (__predict_false(kernobj)) {
    498  1.151.2.1   thorpej 		/* Use a temporary static lock for kernel_object. */
    499  1.151.2.1   thorpej 		rw_init(&bootstrap_kernel_object_lock);
    500  1.151.2.1   thorpej 		uvm_obj_setlock(&aobj->u_obj, &bootstrap_kernel_object_lock);
    501      1.115     rmind 	}
    502        1.1       mrg 
    503        1.5       mrg 	/*
    504        1.5       mrg  	 * now that aobj is ready, add it to the global list
    505        1.5       mrg  	 */
    506       1.46       chs 
    507       1.90        ad 	mutex_enter(&uao_list_lock);
    508        1.5       mrg 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    509       1.90        ad 	mutex_exit(&uao_list_lock);
    510        1.5       mrg 	return(&aobj->u_obj);
    511        1.1       mrg }
    512        1.1       mrg 
    513        1.1       mrg /*
    514      1.121  riastrad  * uao_set_pgfl: allocate pages only from the specified freelist.
    515      1.121  riastrad  *
    516      1.121  riastrad  * => must be called before any pages are allocated for the object.
    517      1.122  riastrad  * => reset by setting it to VM_NFREELIST, meaning any freelist.
    518      1.121  riastrad  */
    519      1.121  riastrad 
    520      1.121  riastrad void
    521      1.121  riastrad uao_set_pgfl(struct uvm_object *uobj, int freelist)
    522      1.121  riastrad {
    523      1.121  riastrad 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    524      1.121  riastrad 
    525      1.121  riastrad 	KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
    526      1.122  riastrad 	KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
    527      1.122  riastrad 	    freelist);
    528      1.121  riastrad 
    529      1.121  riastrad 	aobj->u_freelist = freelist;
    530      1.121  riastrad }
    531      1.121  riastrad 
    532      1.121  riastrad /*
    533      1.121  riastrad  * uao_pagealloc: allocate a page for aobj.
    534      1.121  riastrad  */
    535      1.121  riastrad 
    536      1.121  riastrad static inline struct vm_page *
    537      1.121  riastrad uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
    538      1.121  riastrad {
    539      1.121  riastrad 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    540      1.121  riastrad 
    541      1.121  riastrad 	if (__predict_true(aobj->u_freelist == VM_NFREELIST))
    542      1.121  riastrad 		return uvm_pagealloc(uobj, offset, NULL, flags);
    543      1.121  riastrad 	else
    544      1.121  riastrad 		return uvm_pagealloc_strat(uobj, offset, NULL, flags,
    545      1.121  riastrad 		    UVM_PGA_STRAT_ONLY, aobj->u_freelist);
    546      1.121  riastrad }
    547      1.121  riastrad 
    548      1.121  riastrad /*
    549        1.1       mrg  * uao_init: set up aobj pager subsystem
    550        1.1       mrg  *
    551        1.1       mrg  * => called at boot time from uvm_pager_init()
    552        1.1       mrg  */
    553       1.46       chs 
    554       1.27       chs void
    555       1.46       chs uao_init(void)
    556        1.5       mrg {
    557       1.12   thorpej 	static int uao_initialized;
    558       1.12   thorpej 
    559       1.12   thorpej 	if (uao_initialized)
    560       1.12   thorpej 		return;
    561       1.87   thorpej 	uao_initialized = true;
    562        1.5       mrg 	LIST_INIT(&uao_list);
    563       1.96        ad 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    564      1.107     pooka 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    565      1.107     pooka 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
    566        1.1       mrg }
    567        1.1       mrg 
    568        1.1       mrg /*
    569      1.118     rmind  * uao_reference: hold a reference to an anonymous UVM object.
    570        1.1       mrg  */
    571        1.5       mrg void
    572       1.67   thorpej uao_reference(struct uvm_object *uobj)
    573        1.1       mrg {
    574      1.118     rmind 	/* Kernel object is persistent. */
    575      1.118     rmind 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    576      1.101        ad 		return;
    577      1.118     rmind 	}
    578      1.118     rmind 	atomic_inc_uint(&uobj->uo_refs);
    579        1.1       mrg }
    580        1.1       mrg 
    581        1.1       mrg /*
    582      1.118     rmind  * uao_detach: drop a reference to an anonymous UVM object.
    583        1.1       mrg  */
    584        1.5       mrg void
    585       1.67   thorpej uao_detach(struct uvm_object *uobj)
    586        1.5       mrg {
    587      1.118     rmind 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    588      1.132        ad 	struct uvm_page_array a;
    589      1.118     rmind 	struct vm_page *pg;
    590      1.118     rmind 
    591      1.149     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    592      1.101        ad 
    593      1.101        ad 	/*
    594      1.118     rmind 	 * Detaching from kernel object is a NOP.
    595      1.118     rmind 	 */
    596      1.101        ad 
    597      1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    598      1.102        ad 		return;
    599      1.101        ad 
    600        1.5       mrg 	/*
    601      1.118     rmind 	 * Drop the reference.  If it was the last one, destroy the object.
    602      1.118     rmind 	 */
    603        1.5       mrg 
    604      1.125       chs 	KASSERT(uobj->uo_refs > 0);
    605      1.136       rin 	UVMHIST_LOG(maphist,"  (uobj=%#jx)  ref=%jd",
    606      1.126  pgoyette 	    (uintptr_t)uobj, uobj->uo_refs, 0, 0);
    607      1.118     rmind 	if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
    608        1.5       mrg 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    609        1.5       mrg 		return;
    610        1.5       mrg 	}
    611        1.5       mrg 
    612        1.5       mrg 	/*
    613      1.118     rmind 	 * Remove the aobj from the global list.
    614      1.118     rmind 	 */
    615       1.46       chs 
    616       1.92        ad 	mutex_enter(&uao_list_lock);
    617        1.5       mrg 	LIST_REMOVE(aobj, u_list);
    618       1.92        ad 	mutex_exit(&uao_list_lock);
    619        1.5       mrg 
    620        1.5       mrg 	/*
    621      1.118     rmind 	 * Free all the pages left in the aobj.  For each page, when the
    622      1.118     rmind 	 * page is no longer busy (and thus after any disk I/O that it is
    623      1.118     rmind 	 * involved in is complete), release any swap resources and free
    624      1.118     rmind 	 * the page itself.
    625      1.118     rmind 	 */
    626      1.146        ad 	uvm_page_array_init(&a, uobj, 0);
    627      1.135        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    628      1.146        ad 	while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
    629      1.132        ad 		uvm_page_array_advance(&a);
    630      1.130        ad 		pmap_page_protect(pg, VM_PROT_NONE);
    631        1.5       mrg 		if (pg->flags & PG_BUSY) {
    632      1.137        ad 			uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
    633      1.132        ad 			uvm_page_array_clear(&a);
    634      1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    635        1.5       mrg 			continue;
    636        1.5       mrg 		}
    637       1.18       chs 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    638        1.5       mrg 		uvm_pagefree(pg);
    639        1.5       mrg 	}
    640      1.132        ad 	uvm_page_array_fini(&a);
    641        1.1       mrg 
    642        1.5       mrg 	/*
    643      1.118     rmind 	 * Finally, free the anonymous UVM object itself.
    644      1.118     rmind 	 */
    645        1.1       mrg 
    646        1.5       mrg 	uao_free(aobj);
    647        1.5       mrg }
    648        1.1       mrg 
    649        1.1       mrg /*
    650       1.46       chs  * uao_put: flush pages out of a uvm object
    651       1.22   thorpej  *
    652       1.22   thorpej  * => object should be locked by caller.  we may _unlock_ the object
    653       1.22   thorpej  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    654       1.22   thorpej  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    655       1.22   thorpej  *	XXXJRT a page, we simply just deactivate it.  Should probably
    656       1.22   thorpej  *	XXXJRT handle this better, in the future (although "flushing"
    657       1.22   thorpej  *	XXXJRT anonymous memory isn't terribly important).
    658       1.22   thorpej  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    659       1.22   thorpej  *	or block.
    660       1.22   thorpej  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    661       1.22   thorpej  *	for flushing.
    662       1.86      matt  * => we return 0 unless we encountered some sort of I/O error
    663       1.22   thorpej  *	XXXJRT currently never happens, as we never directly initiate
    664       1.22   thorpej  *	XXXJRT I/O
    665        1.1       mrg  */
    666       1.22   thorpej 
    667       1.68   thorpej static int
    668       1.67   thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    669        1.5       mrg {
    670       1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    671      1.132        ad 	struct uvm_page_array a;
    672      1.132        ad 	struct vm_page *pg;
    673       1.28    kleink 	voff_t curoff;
    674      1.149     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    675       1.22   thorpej 
    676      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    677      1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    678       1.96        ad 
    679       1.22   thorpej 	if (flags & PGO_ALLPAGES) {
    680       1.22   thorpej 		start = 0;
    681       1.22   thorpej 		stop = aobj->u_pages << PAGE_SHIFT;
    682       1.22   thorpej 	} else {
    683       1.22   thorpej 		start = trunc_page(start);
    684       1.71      yamt 		if (stop == 0) {
    685       1.71      yamt 			stop = aobj->u_pages << PAGE_SHIFT;
    686       1.71      yamt 		} else {
    687       1.71      yamt 			stop = round_page(stop);
    688       1.71      yamt 		}
    689      1.127       chs 		if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
    690      1.127       chs 			printf("uao_put: strange, got an out of range "
    691      1.136       rin 			    "flush %#jx > %#jx (fixed)\n",
    692      1.127       chs 			    (uintmax_t)stop,
    693      1.127       chs 			    (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
    694       1.22   thorpej 			stop = aobj->u_pages << PAGE_SHIFT;
    695       1.22   thorpej 		}
    696       1.22   thorpej 	}
    697       1.22   thorpej 	UVMHIST_LOG(maphist,
    698      1.136       rin 	    " flush start=%#jx, stop=%#jx, flags=%#jx",
    699      1.132        ad 	    start, stop, flags, 0);
    700        1.1       mrg 
    701        1.5       mrg 	/*
    702       1.22   thorpej 	 * Don't need to do any work here if we're not freeing
    703       1.22   thorpej 	 * or deactivating pages.
    704       1.22   thorpej 	 */
    705       1.46       chs 
    706       1.22   thorpej 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    707      1.135        ad 		rw_exit(uobj->vmobjlock);
    708       1.46       chs 		return 0;
    709       1.22   thorpej 	}
    710       1.22   thorpej 
    711       1.99        ad 	/* locked: uobj */
    712      1.146        ad 	uvm_page_array_init(&a, uobj, 0);
    713      1.132        ad 	curoff = start;
    714      1.146        ad 	while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
    715      1.132        ad 		if (pg->offset >= stop) {
    716      1.132        ad 			break;
    717       1.22   thorpej 		}
    718       1.98      yamt 
    719       1.98      yamt 		/*
    720       1.98      yamt 		 * wait and try again if the page is busy.
    721       1.98      yamt 		 */
    722       1.98      yamt 
    723       1.98      yamt 		if (pg->flags & PG_BUSY) {
    724      1.137        ad 			uvm_pagewait(pg, uobj->vmobjlock, "uao_put");
    725      1.132        ad 			uvm_page_array_clear(&a);
    726      1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    727       1.98      yamt 			continue;
    728       1.98      yamt 		}
    729      1.132        ad 		uvm_page_array_advance(&a);
    730      1.132        ad 		curoff = pg->offset + PAGE_SIZE;
    731       1.98      yamt 
    732       1.46       chs 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    733       1.41       chs 
    734       1.22   thorpej 		/*
    735       1.22   thorpej 		 * XXX In these first 3 cases, we always just
    736       1.22   thorpej 		 * XXX deactivate the page.  We may want to
    737       1.22   thorpej 		 * XXX handle the different cases more specifically
    738       1.22   thorpej 		 * XXX in the future.
    739       1.22   thorpej 		 */
    740       1.46       chs 
    741       1.22   thorpej 		case PGO_CLEANIT|PGO_FREE:
    742       1.22   thorpej 		case PGO_CLEANIT|PGO_DEACTIVATE:
    743       1.22   thorpej 		case PGO_DEACTIVATE:
    744       1.25   thorpej  deactivate_it:
    745      1.133        ad  			uvm_pagelock(pg);
    746      1.131        ad 			uvm_pagedeactivate(pg);
    747      1.133        ad  			uvm_pageunlock(pg);
    748       1.98      yamt 			break;
    749       1.22   thorpej 
    750       1.22   thorpej 		case PGO_FREE:
    751       1.25   thorpej 			/*
    752       1.25   thorpej 			 * If there are multiple references to
    753       1.25   thorpej 			 * the object, just deactivate the page.
    754       1.25   thorpej 			 */
    755       1.46       chs 
    756       1.25   thorpej 			if (uobj->uo_refs > 1)
    757       1.25   thorpej 				goto deactivate_it;
    758       1.25   thorpej 
    759       1.22   thorpej 			/*
    760       1.98      yamt 			 * free the swap slot and the page.
    761       1.22   thorpej 			 */
    762       1.46       chs 
    763       1.46       chs 			pmap_page_protect(pg, VM_PROT_NONE);
    764       1.75      yamt 
    765       1.75      yamt 			/*
    766       1.75      yamt 			 * freeing swapslot here is not strictly necessary.
    767       1.75      yamt 			 * however, leaving it here doesn't save much
    768       1.75      yamt 			 * because we need to update swap accounting anyway.
    769       1.75      yamt 			 */
    770       1.75      yamt 
    771       1.46       chs 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    772       1.46       chs 			uvm_pagefree(pg);
    773       1.98      yamt 			break;
    774       1.98      yamt 
    775       1.98      yamt 		default:
    776       1.98      yamt 			panic("%s: impossible", __func__);
    777       1.22   thorpej 		}
    778       1.22   thorpej 	}
    779      1.135        ad 	rw_exit(uobj->vmobjlock);
    780      1.132        ad 	uvm_page_array_fini(&a);
    781       1.46       chs 	return 0;
    782        1.1       mrg }
    783        1.1       mrg 
    784        1.1       mrg /*
    785        1.1       mrg  * uao_get: fetch me a page
    786        1.1       mrg  *
    787        1.1       mrg  * we have three cases:
    788        1.1       mrg  * 1: page is resident     -> just return the page.
    789        1.1       mrg  * 2: page is zero-fill    -> allocate a new page and zero it.
    790        1.1       mrg  * 3: page is swapped out  -> fetch the page from swap.
    791        1.1       mrg  *
    792      1.142        ad  * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot.
    793      1.142        ad  * so, if the "center" page hits case 2/3 then we will need to return EBUSY.
    794        1.1       mrg  *
    795        1.1       mrg  * => prefer map unlocked (not required)
    796        1.1       mrg  * => object must be locked!  we will _unlock_ it before starting any I/O.
    797      1.142        ad  * => flags: PGO_LOCKED: fault data structures are locked
    798        1.1       mrg  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    799        1.1       mrg  * => NOTE: caller must check for released pages!!
    800        1.1       mrg  */
    801       1.46       chs 
    802        1.5       mrg static int
    803       1.67   thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    804       1.82      yamt     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    805        1.5       mrg {
    806       1.28    kleink 	voff_t current_offset;
    807      1.147        ad 	struct vm_page *ptmp;
    808      1.147        ad 	int lcv, gotpages, maxpages, swslot, pageidx;
    809      1.144        ad 	bool overwrite = ((flags & PGO_OVERWRITE) != 0);
    810      1.147        ad 	struct uvm_page_array a;
    811        1.5       mrg 
    812      1.149     skrll 	UVMHIST_FUNC(__func__);
    813      1.149     skrll 	UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
    814      1.126  pgoyette 		    (uintptr_t)uobj, offset, flags,0);
    815       1.37       chs 
    816        1.5       mrg 	/*
    817      1.139        ad 	 * the object must be locked.  it can only be a read lock when
    818      1.141        ad 	 * processing a read fault with PGO_LOCKED.
    819      1.139        ad 	 */
    820      1.139        ad 
    821      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    822      1.139        ad 	KASSERT(rw_lock_held(uobj->vmobjlock));
    823      1.139        ad 	KASSERT(rw_write_held(uobj->vmobjlock) ||
    824      1.141        ad 	   ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
    825      1.139        ad 
    826      1.139        ad 	/*
    827        1.5       mrg  	 * get number of pages
    828        1.5       mrg  	 */
    829       1.46       chs 
    830        1.5       mrg 	maxpages = *npagesp;
    831        1.5       mrg 
    832        1.5       mrg 	/*
    833        1.5       mrg  	 * step 1: handled the case where fault data structures are locked.
    834        1.5       mrg  	 */
    835        1.1       mrg 
    836        1.5       mrg 	if (flags & PGO_LOCKED) {
    837       1.46       chs 
    838        1.5       mrg 		/*
    839        1.5       mrg  		 * step 1a: get pages that are already resident.   only do
    840        1.5       mrg 		 * this if the data structures are locked (i.e. the first
    841        1.5       mrg 		 * time through).
    842        1.5       mrg  		 */
    843        1.5       mrg 
    844      1.146        ad 		uvm_page_array_init(&a, uobj, 0);
    845        1.5       mrg 		gotpages = 0;	/* # of pages we got so far */
    846      1.141        ad 		for (lcv = 0; lcv < maxpages; lcv++) {
    847      1.146        ad 			ptmp = uvm_page_array_fill_and_peek(&a,
    848      1.146        ad 			    offset + (lcv << PAGE_SHIFT), maxpages);
    849      1.141        ad 			if (ptmp == NULL) {
    850      1.141        ad 				break;
    851      1.141        ad 			}
    852      1.141        ad 			KASSERT(ptmp->offset >= offset);
    853      1.141        ad 			lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
    854      1.141        ad 			if (lcv >= maxpages) {
    855      1.141        ad 				break;
    856        1.5       mrg 			}
    857      1.141        ad 			uvm_page_array_advance(&a);
    858        1.5       mrg 
    859        1.5       mrg 			/*
    860       1.46       chs 			 * to be useful must get a non-busy page
    861        1.5       mrg 			 */
    862       1.46       chs 
    863      1.141        ad 			if ((ptmp->flags & PG_BUSY) != 0) {
    864      1.124    martin 				continue;
    865        1.5       mrg 			}
    866        1.5       mrg 
    867        1.5       mrg 			/*
    868      1.141        ad 			 * useful page: plug it in our result array
    869        1.5       mrg 			 */
    870      1.141        ad 
    871      1.134        ad 			KASSERT(uvm_pagegetdirty(ptmp) !=
    872      1.134        ad 			    UVM_PAGE_STATUS_CLEAN);
    873        1.5       mrg 			pps[lcv] = ptmp;
    874        1.5       mrg 			gotpages++;
    875       1.46       chs 		}
    876      1.141        ad 		uvm_page_array_fini(&a);
    877        1.5       mrg 
    878        1.5       mrg 		/*
    879        1.5       mrg  		 * step 1b: now we've either done everything needed or we
    880        1.5       mrg 		 * to unlock and do some waiting or I/O.
    881        1.5       mrg  		 */
    882        1.5       mrg 
    883      1.143   hannken 		UVMHIST_LOG(pdhist, "<- done (done=%jd)",
    884      1.143   hannken 		    (pps[centeridx] != NULL), 0,0,0);
    885        1.5       mrg 		*npagesp = gotpages;
    886      1.142        ad 		return pps[centeridx] != NULL ? 0 : EBUSY;
    887        1.1       mrg 	}
    888        1.1       mrg 
    889        1.5       mrg 	/*
    890        1.5       mrg  	 * step 2: get non-resident or busy pages.
    891        1.5       mrg  	 * object is locked.   data structures are unlocked.
    892        1.5       mrg  	 */
    893        1.5       mrg 
    894       1.76      yamt 	if ((flags & PGO_SYNCIO) == 0) {
    895       1.76      yamt 		goto done;
    896       1.76      yamt 	}
    897       1.76      yamt 
    898      1.147        ad 	uvm_page_array_init(&a, uobj, 0);
    899      1.147        ad 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;) {
    900       1.27       chs 
    901        1.5       mrg 		/*
    902        1.5       mrg  		 * we have yet to locate the current page (pps[lcv]).   we
    903        1.5       mrg 		 * first look for a page that is already at the current offset.
    904        1.5       mrg 		 * if we find a page, we check to see if it is busy or
    905        1.5       mrg 		 * released.  if that is the case, then we sleep on the page
    906        1.5       mrg 		 * until it is no longer busy or released and repeat the lookup.
    907        1.5       mrg 		 * if the page we found is neither busy nor released, then we
    908      1.147        ad 		 * busy it (so we own it) and plug it into pps[lcv].   we are
    909      1.147        ad 		 * ready to move on to the next page.
    910        1.5       mrg  		 */
    911        1.5       mrg 
    912      1.147        ad 		ptmp = uvm_page_array_fill_and_peek(&a, current_offset,
    913      1.147        ad 		    maxpages - lcv);
    914        1.5       mrg 
    915      1.147        ad 		if (ptmp != NULL && ptmp->offset == current_offset) {
    916        1.5       mrg 			/* page is there, see if we need to wait on it */
    917       1.46       chs 			if ((ptmp->flags & PG_BUSY) != 0) {
    918        1.5       mrg 				UVMHIST_LOG(pdhist,
    919      1.136       rin 				    "sleeping, ptmp->flags %#jx\n",
    920        1.5       mrg 				    ptmp->flags,0,0,0);
    921      1.137        ad 				uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
    922      1.135        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    923      1.147        ad 				uvm_page_array_clear(&a);
    924       1.46       chs 				continue;
    925        1.5       mrg 			}
    926       1.41       chs 
    927       1.41       chs 			/*
    928      1.147        ad  			 * if we get here then the page is resident and
    929      1.147        ad 			 * unbusy.  we busy it now (so we own it).  if
    930      1.147        ad 			 * overwriting, mark the page dirty up front as
    931      1.147        ad 			 * it will be zapped via an unmanaged mapping.
    932        1.5       mrg  			 */
    933       1.46       chs 
    934      1.134        ad 			KASSERT(uvm_pagegetdirty(ptmp) !=
    935      1.134        ad 			    UVM_PAGE_STATUS_CLEAN);
    936      1.145        ad 			if (overwrite) {
    937      1.145        ad 				uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
    938      1.145        ad 			}
    939        1.5       mrg 			/* we own it, caller must un-busy */
    940        1.5       mrg 			ptmp->flags |= PG_BUSY;
    941        1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get2");
    942      1.147        ad 			pps[lcv++] = ptmp;
    943      1.147        ad 			current_offset += PAGE_SIZE;
    944      1.147        ad 			uvm_page_array_advance(&a);
    945      1.147        ad 			continue;
    946      1.147        ad 		} else {
    947      1.147        ad 			KASSERT(ptmp == NULL || ptmp->offset > current_offset);
    948        1.5       mrg 		}
    949        1.5       mrg 
    950        1.5       mrg 		/*
    951      1.147        ad 		 * not resident.  allocate a new busy/fake/clean page in the
    952      1.147        ad 		 * object.  if it's in swap we need to do I/O to fill in the
    953      1.147        ad 		 * data, otherwise the page needs to be cleared: if it's not
    954      1.147        ad 		 * destined to be overwritten, then zero it here and now.
    955      1.147        ad 		 */
    956       1.46       chs 
    957      1.147        ad 		pageidx = current_offset >> PAGE_SHIFT;
    958      1.147        ad 		swslot = uao_find_swslot(uobj, pageidx);
    959      1.147        ad 		ptmp = uao_pagealloc(uobj, current_offset,
    960      1.147        ad 		    swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO);
    961      1.147        ad 
    962      1.147        ad 		/* out of RAM? */
    963      1.147        ad 		if (ptmp == NULL) {
    964      1.147        ad 			rw_exit(uobj->vmobjlock);
    965      1.150    simonb 			UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0);
    966      1.147        ad 			uvm_wait("uao_getpage");
    967      1.147        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    968      1.147        ad 			uvm_page_array_clear(&a);
    969      1.147        ad 			continue;
    970      1.147        ad 		}
    971        1.5       mrg 
    972        1.5       mrg 		/*
    973      1.148     skrll  		 * if swslot == 0, page hasn't existed before and is zeroed.
    974      1.142        ad  		 * otherwise we have a "fake/busy/clean" page that we just
    975      1.142        ad  		 * allocated.  do the needed "i/o", reading from swap.
    976        1.5       mrg  		 */
    977       1.46       chs 
    978      1.142        ad 		if (swslot != 0) {
    979       1.72      yamt #if defined(VMSWAP)
    980       1.72      yamt 			int error;
    981       1.72      yamt 
    982      1.126  pgoyette 			UVMHIST_LOG(pdhist, "pagein from swslot %jd",
    983        1.5       mrg 			     swslot, 0,0,0);
    984        1.5       mrg 
    985        1.5       mrg 			/*
    986        1.5       mrg 			 * page in the swapped-out page.
    987        1.5       mrg 			 * unlock object for i/o, relock when done.
    988        1.5       mrg 			 */
    989       1.46       chs 
    990      1.151       chs 			uvm_page_array_clear(&a);
    991      1.135        ad 			rw_exit(uobj->vmobjlock);
    992       1.46       chs 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
    993      1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    994        1.5       mrg 
    995        1.5       mrg 			/*
    996        1.5       mrg 			 * I/O done.  check for errors.
    997        1.5       mrg 			 */
    998       1.46       chs 
    999       1.46       chs 			if (error != 0) {
   1000      1.126  pgoyette 				UVMHIST_LOG(pdhist, "<- done (error=%jd)",
   1001       1.46       chs 				    error,0,0,0);
   1002       1.27       chs 
   1003       1.27       chs 				/*
   1004       1.27       chs 				 * remove the swap slot from the aobj
   1005       1.27       chs 				 * and mark the aobj as having no real slot.
   1006       1.27       chs 				 * don't free the swap slot, thus preventing
   1007       1.27       chs 				 * it from being used again.
   1008       1.27       chs 				 */
   1009       1.46       chs 
   1010      1.118     rmind 				swslot = uao_set_swslot(uobj, pageidx,
   1011      1.118     rmind 				    SWSLOT_BAD);
   1012       1.57        pk 				if (swslot > 0) {
   1013       1.45       chs 					uvm_swap_markbad(swslot, 1);
   1014       1.45       chs 				}
   1015       1.27       chs 
   1016        1.5       mrg 				uvm_pagefree(ptmp);
   1017      1.135        ad 				rw_exit(uobj->vmobjlock);
   1018      1.142        ad 				UVMHIST_LOG(pdhist, "<- done (error)",
   1019      1.142        ad 				    error,lcv,0,0);
   1020      1.142        ad 				if (lcv != 0) {
   1021      1.142        ad 					uvm_page_unbusy(pps, lcv);
   1022      1.142        ad 				}
   1023      1.142        ad 				memset(pps, 0, maxpages * sizeof(pps[0]));
   1024      1.151       chs 				uvm_page_array_fini(&a);
   1025       1.46       chs 				return error;
   1026        1.5       mrg 			}
   1027       1.72      yamt #else /* defined(VMSWAP) */
   1028       1.72      yamt 			panic("%s: pagein", __func__);
   1029       1.72      yamt #endif /* defined(VMSWAP) */
   1030        1.5       mrg 		}
   1031        1.5       mrg 
   1032      1.134        ad 		/*
   1033      1.134        ad 		 * note that we will allow the page being writably-mapped
   1034      1.144        ad 		 * (!PG_RDONLY) regardless of access_type.  if overwrite,
   1035      1.144        ad 		 * the page can be modified through an unmanaged mapping
   1036      1.144        ad 		 * so mark it dirty up front.
   1037      1.134        ad 		 */
   1038      1.144        ad 		if (overwrite) {
   1039      1.144        ad 			uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
   1040      1.144        ad 		} else {
   1041      1.144        ad 			uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
   1042      1.144        ad 		}
   1043       1.78      yamt 
   1044       1.41       chs 		/*
   1045        1.5       mrg  		 * we got the page!   clear the fake flag (indicates valid
   1046        1.5       mrg 		 * data now in page) and plug into our result array.   note
   1047       1.41       chs 		 * that page is still busy.
   1048        1.5       mrg  		 *
   1049        1.5       mrg  		 * it is the callers job to:
   1050        1.5       mrg  		 * => check if the page is released
   1051        1.5       mrg  		 * => unbusy the page
   1052        1.5       mrg  		 * => activate the page
   1053        1.5       mrg  		 */
   1054      1.134        ad 		KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
   1055      1.134        ad 		KASSERT((ptmp->flags & PG_FAKE) != 0);
   1056      1.147        ad 		KASSERT(ptmp->offset == current_offset);
   1057       1.46       chs 		ptmp->flags &= ~PG_FAKE;
   1058      1.147        ad 		pps[lcv++] = ptmp;
   1059      1.147        ad 		current_offset += PAGE_SIZE;
   1060       1.46       chs 	}
   1061      1.147        ad 	uvm_page_array_fini(&a);
   1062        1.1       mrg 
   1063        1.1       mrg 	/*
   1064        1.5       mrg  	 * finally, unlock object and return.
   1065        1.5       mrg  	 */
   1066        1.1       mrg 
   1067       1.76      yamt done:
   1068      1.135        ad 	rw_exit(uobj->vmobjlock);
   1069        1.5       mrg 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1070       1.46       chs 	return 0;
   1071        1.1       mrg }
   1072        1.1       mrg 
   1073       1.72      yamt #if defined(VMSWAP)
   1074       1.72      yamt 
   1075        1.1       mrg /*
   1076       1.18       chs  * uao_dropswap:  release any swap resources from this aobj page.
   1077       1.41       chs  *
   1078       1.18       chs  * => aobj must be locked or have a reference count of 0.
   1079       1.18       chs  */
   1080       1.18       chs 
   1081       1.18       chs void
   1082       1.67   thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
   1083       1.18       chs {
   1084       1.18       chs 	int slot;
   1085       1.18       chs 
   1086      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
   1087      1.141        ad 
   1088       1.18       chs 	slot = uao_set_swslot(uobj, pageidx, 0);
   1089       1.18       chs 	if (slot) {
   1090       1.18       chs 		uvm_swap_free(slot, 1);
   1091       1.18       chs 	}
   1092       1.27       chs }
   1093       1.27       chs 
   1094       1.27       chs /*
   1095       1.27       chs  * page in every page in every aobj that is paged-out to a range of swslots.
   1096       1.41       chs  *
   1097       1.27       chs  * => nothing should be locked.
   1098       1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1099       1.27       chs  */
   1100       1.46       chs 
   1101       1.85   thorpej bool
   1102       1.67   thorpej uao_swap_off(int startslot, int endslot)
   1103       1.27       chs {
   1104      1.118     rmind 	struct uvm_aobj *aobj;
   1105       1.27       chs 
   1106       1.27       chs 	/*
   1107      1.118     rmind 	 * Walk the list of all anonymous UVM objects.  Grab the first.
   1108       1.27       chs 	 */
   1109      1.118     rmind 	mutex_enter(&uao_list_lock);
   1110      1.118     rmind 	if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
   1111      1.118     rmind 		mutex_exit(&uao_list_lock);
   1112      1.118     rmind 		return false;
   1113      1.118     rmind 	}
   1114      1.118     rmind 	uao_reference(&aobj->u_obj);
   1115       1.27       chs 
   1116      1.118     rmind 	do {
   1117      1.118     rmind 		struct uvm_aobj *nextaobj;
   1118      1.118     rmind 		bool rv;
   1119       1.27       chs 
   1120       1.27       chs 		/*
   1121      1.118     rmind 		 * Prefetch the next object and immediately hold a reference
   1122      1.118     rmind 		 * on it, so neither the current nor the next entry could
   1123      1.118     rmind 		 * disappear while we are iterating.
   1124       1.27       chs 		 */
   1125      1.118     rmind 		if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
   1126      1.118     rmind 			uao_reference(&nextaobj->u_obj);
   1127       1.27       chs 		}
   1128       1.90        ad 		mutex_exit(&uao_list_lock);
   1129       1.27       chs 
   1130       1.27       chs 		/*
   1131      1.118     rmind 		 * Page in all pages in the swap slot range.
   1132       1.27       chs 		 */
   1133      1.135        ad 		rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
   1134      1.118     rmind 		rv = uao_pagein(aobj, startslot, endslot);
   1135      1.135        ad 		rw_exit(aobj->u_obj.vmobjlock);
   1136       1.46       chs 
   1137      1.118     rmind 		/* Drop the reference of the current object. */
   1138      1.118     rmind 		uao_detach(&aobj->u_obj);
   1139       1.27       chs 		if (rv) {
   1140      1.118     rmind 			if (nextaobj) {
   1141      1.118     rmind 				uao_detach(&nextaobj->u_obj);
   1142      1.118     rmind 			}
   1143       1.27       chs 			return rv;
   1144       1.27       chs 		}
   1145       1.27       chs 
   1146      1.118     rmind 		aobj = nextaobj;
   1147       1.90        ad 		mutex_enter(&uao_list_lock);
   1148      1.118     rmind 	} while (aobj);
   1149       1.27       chs 
   1150       1.90        ad 	mutex_exit(&uao_list_lock);
   1151       1.87   thorpej 	return false;
   1152       1.27       chs }
   1153       1.27       chs 
   1154       1.27       chs /*
   1155       1.27       chs  * page in any pages from aobj in the given range.
   1156       1.27       chs  *
   1157       1.27       chs  * => aobj must be locked and is returned locked.
   1158       1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1159       1.27       chs  */
   1160       1.85   thorpej static bool
   1161       1.67   thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1162       1.27       chs {
   1163       1.85   thorpej 	bool rv;
   1164       1.27       chs 
   1165       1.27       chs 	if (UAO_USES_SWHASH(aobj)) {
   1166       1.27       chs 		struct uao_swhash_elt *elt;
   1167       1.65  christos 		int buck;
   1168       1.27       chs 
   1169       1.27       chs restart:
   1170       1.65  christos 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1171       1.65  christos 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1172       1.27       chs 			     elt != NULL;
   1173       1.27       chs 			     elt = LIST_NEXT(elt, list)) {
   1174       1.27       chs 				int i;
   1175       1.27       chs 
   1176       1.27       chs 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1177       1.27       chs 					int slot = elt->slots[i];
   1178       1.27       chs 
   1179       1.27       chs 					/*
   1180       1.27       chs 					 * if the slot isn't in range, skip it.
   1181       1.27       chs 					 */
   1182       1.46       chs 
   1183       1.41       chs 					if (slot < startslot ||
   1184       1.27       chs 					    slot >= endslot) {
   1185       1.27       chs 						continue;
   1186       1.27       chs 					}
   1187       1.27       chs 
   1188       1.27       chs 					/*
   1189       1.27       chs 					 * process the page,
   1190       1.27       chs 					 * the start over on this object
   1191       1.27       chs 					 * since the swhash elt
   1192       1.27       chs 					 * may have been freed.
   1193       1.27       chs 					 */
   1194       1.46       chs 
   1195       1.27       chs 					rv = uao_pagein_page(aobj,
   1196       1.27       chs 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1197       1.27       chs 					if (rv) {
   1198       1.27       chs 						return rv;
   1199       1.27       chs 					}
   1200       1.27       chs 					goto restart;
   1201       1.27       chs 				}
   1202       1.27       chs 			}
   1203       1.27       chs 		}
   1204       1.27       chs 	} else {
   1205       1.27       chs 		int i;
   1206       1.27       chs 
   1207       1.27       chs 		for (i = 0; i < aobj->u_pages; i++) {
   1208       1.27       chs 			int slot = aobj->u_swslots[i];
   1209       1.27       chs 
   1210       1.27       chs 			/*
   1211       1.27       chs 			 * if the slot isn't in range, skip it
   1212       1.27       chs 			 */
   1213       1.46       chs 
   1214       1.27       chs 			if (slot < startslot || slot >= endslot) {
   1215       1.27       chs 				continue;
   1216       1.27       chs 			}
   1217       1.27       chs 
   1218       1.27       chs 			/*
   1219       1.27       chs 			 * process the page.
   1220       1.27       chs 			 */
   1221       1.46       chs 
   1222       1.27       chs 			rv = uao_pagein_page(aobj, i);
   1223       1.27       chs 			if (rv) {
   1224       1.27       chs 				return rv;
   1225       1.27       chs 			}
   1226       1.27       chs 		}
   1227       1.27       chs 	}
   1228       1.27       chs 
   1229       1.87   thorpej 	return false;
   1230       1.27       chs }
   1231       1.27       chs 
   1232       1.27       chs /*
   1233      1.117     rmind  * uao_pagein_page: page in a single page from an anonymous UVM object.
   1234       1.27       chs  *
   1235      1.117     rmind  * => Returns true if pagein was aborted due to lack of memory.
   1236      1.117     rmind  * => Object must be locked and is returned locked.
   1237       1.27       chs  */
   1238       1.46       chs 
   1239       1.85   thorpej static bool
   1240       1.67   thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1241       1.27       chs {
   1242      1.117     rmind 	struct uvm_object *uobj = &aobj->u_obj;
   1243       1.27       chs 	struct vm_page *pg;
   1244       1.57        pk 	int rv, npages;
   1245       1.27       chs 
   1246       1.27       chs 	pg = NULL;
   1247       1.27       chs 	npages = 1;
   1248      1.117     rmind 
   1249      1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
   1250      1.128   msaitoh 	rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
   1251      1.117     rmind 	    0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
   1252       1.27       chs 
   1253       1.27       chs 	/*
   1254       1.27       chs 	 * relock and finish up.
   1255       1.27       chs 	 */
   1256       1.46       chs 
   1257      1.135        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
   1258       1.27       chs 	switch (rv) {
   1259       1.40       chs 	case 0:
   1260       1.27       chs 		break;
   1261       1.27       chs 
   1262       1.40       chs 	case EIO:
   1263       1.40       chs 	case ERESTART:
   1264       1.46       chs 
   1265       1.27       chs 		/*
   1266       1.27       chs 		 * nothing more to do on errors.
   1267       1.40       chs 		 * ERESTART can only mean that the anon was freed,
   1268       1.27       chs 		 * so again there's nothing to do.
   1269       1.27       chs 		 */
   1270       1.46       chs 
   1271       1.87   thorpej 		return false;
   1272       1.59        pk 
   1273       1.59        pk 	default:
   1274       1.87   thorpej 		return true;
   1275       1.27       chs 	}
   1276       1.27       chs 
   1277       1.27       chs 	/*
   1278       1.27       chs 	 * ok, we've got the page now.
   1279       1.27       chs 	 * mark it as dirty, clear its swslot and un-busy it.
   1280       1.27       chs 	 */
   1281       1.57        pk 	uao_dropswap(&aobj->u_obj, pageidx);
   1282       1.27       chs 
   1283       1.27       chs 	/*
   1284       1.80      yamt 	 * make sure it's on a page queue.
   1285       1.27       chs 	 */
   1286      1.133        ad 	uvm_pagelock(pg);
   1287      1.131        ad 	uvm_pageenqueue(pg);
   1288      1.138        ad 	uvm_pagewakeup(pg);
   1289      1.133        ad 	uvm_pageunlock(pg);
   1290       1.56      yamt 
   1291      1.138        ad 	pg->flags &= ~(PG_BUSY|PG_FAKE);
   1292      1.134        ad 	uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   1293      1.138        ad 	UVM_PAGE_OWN(pg, NULL);
   1294       1.56      yamt 
   1295       1.87   thorpej 	return false;
   1296        1.1       mrg }
   1297       1.72      yamt 
   1298       1.75      yamt /*
   1299       1.75      yamt  * uao_dropswap_range: drop swapslots in the range.
   1300       1.75      yamt  *
   1301       1.75      yamt  * => aobj must be locked and is returned locked.
   1302       1.75      yamt  * => start is inclusive.  end is exclusive.
   1303       1.75      yamt  */
   1304       1.75      yamt 
   1305       1.75      yamt void
   1306       1.75      yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1307       1.75      yamt {
   1308       1.75      yamt 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1309      1.117     rmind 	int swpgonlydelta = 0;
   1310       1.75      yamt 
   1311      1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
   1312      1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
   1313       1.75      yamt 
   1314       1.75      yamt 	if (end == 0) {
   1315       1.75      yamt 		end = INT64_MAX;
   1316       1.75      yamt 	}
   1317       1.75      yamt 
   1318       1.75      yamt 	if (UAO_USES_SWHASH(aobj)) {
   1319       1.75      yamt 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1320       1.75      yamt 		voff_t taghi;
   1321       1.75      yamt 		voff_t taglo;
   1322       1.75      yamt 
   1323       1.75      yamt 		taglo = UAO_SWHASH_ELT_TAG(start);
   1324       1.75      yamt 		taghi = UAO_SWHASH_ELT_TAG(end);
   1325       1.75      yamt 
   1326       1.75      yamt 		for (i = 0; i < hashbuckets; i++) {
   1327       1.75      yamt 			struct uao_swhash_elt *elt, *next;
   1328       1.75      yamt 
   1329       1.75      yamt 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1330       1.75      yamt 			     elt != NULL;
   1331       1.75      yamt 			     elt = next) {
   1332       1.75      yamt 				int startidx, endidx;
   1333       1.75      yamt 				int j;
   1334       1.75      yamt 
   1335       1.75      yamt 				next = LIST_NEXT(elt, list);
   1336       1.75      yamt 
   1337       1.75      yamt 				if (elt->tag < taglo || taghi < elt->tag) {
   1338       1.75      yamt 					continue;
   1339       1.75      yamt 				}
   1340       1.75      yamt 
   1341       1.75      yamt 				if (elt->tag == taglo) {
   1342       1.75      yamt 					startidx =
   1343       1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1344       1.75      yamt 				} else {
   1345       1.75      yamt 					startidx = 0;
   1346       1.75      yamt 				}
   1347       1.75      yamt 
   1348       1.75      yamt 				if (elt->tag == taghi) {
   1349       1.75      yamt 					endidx =
   1350       1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1351       1.75      yamt 				} else {
   1352       1.75      yamt 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1353       1.75      yamt 				}
   1354       1.75      yamt 
   1355       1.75      yamt 				for (j = startidx; j < endidx; j++) {
   1356       1.75      yamt 					int slot = elt->slots[j];
   1357       1.75      yamt 
   1358       1.75      yamt 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1359       1.75      yamt 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1360       1.75      yamt 					    + j) << PAGE_SHIFT) == NULL);
   1361       1.75      yamt 					if (slot > 0) {
   1362       1.75      yamt 						uvm_swap_free(slot, 1);
   1363       1.75      yamt 						swpgonlydelta++;
   1364       1.75      yamt 						KASSERT(elt->count > 0);
   1365       1.75      yamt 						elt->slots[j] = 0;
   1366       1.75      yamt 						elt->count--;
   1367       1.75      yamt 					}
   1368       1.75      yamt 				}
   1369       1.75      yamt 
   1370       1.75      yamt 				if (elt->count == 0) {
   1371       1.75      yamt 					LIST_REMOVE(elt, list);
   1372       1.75      yamt 					pool_put(&uao_swhash_elt_pool, elt);
   1373       1.75      yamt 				}
   1374       1.75      yamt 			}
   1375       1.75      yamt 		}
   1376       1.75      yamt 	} else {
   1377       1.75      yamt 		int i;
   1378       1.75      yamt 
   1379       1.75      yamt 		if (aobj->u_pages < end) {
   1380       1.75      yamt 			end = aobj->u_pages;
   1381       1.75      yamt 		}
   1382       1.75      yamt 		for (i = start; i < end; i++) {
   1383       1.75      yamt 			int slot = aobj->u_swslots[i];
   1384       1.75      yamt 
   1385       1.75      yamt 			if (slot > 0) {
   1386       1.75      yamt 				uvm_swap_free(slot, 1);
   1387       1.75      yamt 				swpgonlydelta++;
   1388       1.75      yamt 			}
   1389       1.75      yamt 		}
   1390       1.75      yamt 	}
   1391       1.75      yamt 
   1392       1.75      yamt 	/*
   1393       1.75      yamt 	 * adjust the counter of pages only in swap for all
   1394       1.75      yamt 	 * the swap slots we've freed.
   1395       1.75      yamt 	 */
   1396       1.75      yamt 
   1397       1.75      yamt 	if (swpgonlydelta > 0) {
   1398       1.75      yamt 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1399      1.129        ad 		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
   1400       1.75      yamt 	}
   1401       1.75      yamt }
   1402       1.75      yamt 
   1403       1.72      yamt #endif /* defined(VMSWAP) */
   1404