Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.150
      1  1.150    simonb /*	$NetBSD: uvm_aobj.c,v 1.150 2020/08/19 07:29:00 simonb Exp $	*/
      2    1.6       mrg 
      3    1.7       chs /*
      4    1.7       chs  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5    1.7       chs  *                    Washington University.
      6    1.7       chs  * All rights reserved.
      7    1.7       chs  *
      8    1.7       chs  * Redistribution and use in source and binary forms, with or without
      9    1.7       chs  * modification, are permitted provided that the following conditions
     10    1.7       chs  * are met:
     11    1.7       chs  * 1. Redistributions of source code must retain the above copyright
     12    1.7       chs  *    notice, this list of conditions and the following disclaimer.
     13    1.7       chs  * 2. Redistributions in binary form must reproduce the above copyright
     14    1.7       chs  *    notice, this list of conditions and the following disclaimer in the
     15    1.7       chs  *    documentation and/or other materials provided with the distribution.
     16    1.7       chs  *
     17    1.7       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18    1.7       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19    1.7       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20    1.7       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21    1.7       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22    1.7       chs  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23    1.7       chs  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24    1.7       chs  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25    1.7       chs  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26    1.7       chs  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27    1.7       chs  *
     28    1.4       mrg  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     29    1.4       mrg  */
     30  1.113     rmind 
     31    1.7       chs /*
     32    1.7       chs  * uvm_aobj.c: anonymous memory uvm_object pager
     33    1.7       chs  *
     34    1.7       chs  * author: Chuck Silvers <chuq (at) chuq.com>
     35    1.7       chs  * started: Jan-1998
     36    1.7       chs  *
     37    1.7       chs  * - design mostly from Chuck Cranor
     38    1.7       chs  */
     39   1.49     lukem 
     40   1.49     lukem #include <sys/cdefs.h>
     41  1.150    simonb __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.150 2020/08/19 07:29:00 simonb Exp $");
     42    1.7       chs 
     43  1.123     pooka #ifdef _KERNEL_OPT
     44    1.7       chs #include "opt_uvmhist.h"
     45  1.123     pooka #endif
     46    1.1       mrg 
     47    1.1       mrg #include <sys/param.h>
     48    1.1       mrg #include <sys/systm.h>
     49   1.37       chs #include <sys/kernel.h>
     50  1.104     rmind #include <sys/kmem.h>
     51   1.12   thorpej #include <sys/pool.h>
     52  1.119      matt #include <sys/atomic.h>
     53    1.1       mrg 
     54    1.1       mrg #include <uvm/uvm.h>
     55  1.132        ad #include <uvm/uvm_page_array.h>
     56    1.1       mrg 
     57    1.1       mrg /*
     58  1.117     rmind  * An anonymous UVM object (aobj) manages anonymous-memory.  In addition to
     59  1.117     rmind  * keeping the list of resident pages, it may also keep a list of allocated
     60  1.117     rmind  * swap blocks.  Depending on the size of the object, this list is either
     61  1.117     rmind  * stored in an array (small objects) or in a hash table (large objects).
     62  1.117     rmind  *
     63  1.117     rmind  * Lock order
     64  1.117     rmind  *
     65  1.118     rmind  *	uao_list_lock ->
     66  1.118     rmind  *		uvm_object::vmobjlock
     67    1.1       mrg  */
     68    1.1       mrg 
     69    1.1       mrg /*
     70  1.117     rmind  * Note: for hash tables, we break the address space of the aobj into blocks
     71  1.117     rmind  * of UAO_SWHASH_CLUSTER_SIZE pages, which shall be a power of two.
     72    1.1       mrg  */
     73    1.1       mrg 
     74  1.117     rmind #define	UAO_SWHASH_CLUSTER_SHIFT	4
     75  1.117     rmind #define	UAO_SWHASH_CLUSTER_SIZE		(1 << UAO_SWHASH_CLUSTER_SHIFT)
     76    1.1       mrg 
     77  1.117     rmind /* Get the "tag" for this page index. */
     78  1.117     rmind #define	UAO_SWHASH_ELT_TAG(idx)		((idx) >> UAO_SWHASH_CLUSTER_SHIFT)
     79  1.117     rmind #define UAO_SWHASH_ELT_PAGESLOT_IDX(idx) \
     80  1.117     rmind     ((idx) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     81    1.1       mrg 
     82  1.117     rmind /* Given an ELT and a page index, find the swap slot. */
     83  1.117     rmind #define	UAO_SWHASH_ELT_PAGESLOT(elt, idx) \
     84  1.117     rmind     ((elt)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(idx)])
     85   1.75      yamt 
     86  1.117     rmind /* Given an ELT, return its pageidx base. */
     87  1.117     rmind #define	UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     88  1.117     rmind     ((elt)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     89    1.1       mrg 
     90  1.117     rmind /* The hash function. */
     91  1.117     rmind #define	UAO_SWHASH_HASH(aobj, idx) \
     92  1.117     rmind     (&(aobj)->u_swhash[(((idx) >> UAO_SWHASH_CLUSTER_SHIFT) \
     93  1.117     rmind     & (aobj)->u_swhashmask)])
     94    1.1       mrg 
     95    1.1       mrg /*
     96  1.117     rmind  * The threshold which determines whether we will use an array or a
     97    1.1       mrg  * hash table to store the list of allocated swap blocks.
     98    1.1       mrg  */
     99  1.117     rmind #define	UAO_SWHASH_THRESHOLD		(UAO_SWHASH_CLUSTER_SIZE * 4)
    100  1.117     rmind #define	UAO_USES_SWHASH(aobj) \
    101  1.117     rmind     ((aobj)->u_pages > UAO_SWHASH_THRESHOLD)
    102  1.117     rmind 
    103  1.117     rmind /* The number of buckets in a hash, with an upper bound. */
    104  1.117     rmind #define	UAO_SWHASH_MAXBUCKETS		256
    105  1.117     rmind #define	UAO_SWHASH_BUCKETS(aobj) \
    106  1.117     rmind     (MIN((aobj)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, UAO_SWHASH_MAXBUCKETS))
    107    1.1       mrg 
    108    1.1       mrg /*
    109    1.1       mrg  * uao_swhash_elt: when a hash table is being used, this structure defines
    110    1.1       mrg  * the format of an entry in the bucket list.
    111    1.1       mrg  */
    112    1.1       mrg 
    113    1.1       mrg struct uao_swhash_elt {
    114    1.5       mrg 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    115   1.28    kleink 	voff_t tag;				/* our 'tag' */
    116    1.5       mrg 	int count;				/* our number of active slots */
    117    1.5       mrg 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    118    1.1       mrg };
    119    1.1       mrg 
    120    1.1       mrg /*
    121    1.1       mrg  * uao_swhash: the swap hash table structure
    122    1.1       mrg  */
    123    1.1       mrg 
    124    1.1       mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
    125    1.1       mrg 
    126   1.12   thorpej /*
    127  1.113     rmind  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
    128  1.113     rmind  * Note: pages for this pool must not come from a pageable kernel map.
    129   1.12   thorpej  */
    130  1.117     rmind static struct pool	uao_swhash_elt_pool	__cacheline_aligned;
    131    1.1       mrg 
    132    1.1       mrg /*
    133    1.1       mrg  * uvm_aobj: the actual anon-backed uvm_object
    134    1.1       mrg  *
    135    1.1       mrg  * => the uvm_object is at the top of the structure, this allows
    136   1.46       chs  *   (struct uvm_aobj *) == (struct uvm_object *)
    137    1.1       mrg  * => only one of u_swslots and u_swhash is used in any given aobj
    138    1.1       mrg  */
    139    1.1       mrg 
    140    1.1       mrg struct uvm_aobj {
    141  1.132        ad 	struct uvm_object u_obj; /* has: lock, pgops, #pages, #refs */
    142   1.79    cherry 	pgoff_t u_pages;	 /* number of pages in entire object */
    143    1.5       mrg 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    144    1.5       mrg 	int *u_swslots;		 /* array of offset->swapslot mappings */
    145    1.5       mrg 				 /*
    146    1.5       mrg 				  * hashtable of offset->swapslot mappings
    147    1.5       mrg 				  * (u_swhash is an array of bucket heads)
    148    1.5       mrg 				  */
    149    1.5       mrg 	struct uao_swhash *u_swhash;
    150    1.5       mrg 	u_long u_swhashmask;		/* mask for hashtable */
    151    1.5       mrg 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    152  1.121  riastrad 	int u_freelist;		  /* freelist to allocate pages from */
    153    1.1       mrg };
    154    1.1       mrg 
    155   1.62  junyoung static void	uao_free(struct uvm_aobj *);
    156   1.62  junyoung static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    157   1.62  junyoung 		    int *, int, vm_prot_t, int, int);
    158   1.86      matt static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    159   1.72      yamt 
    160   1.72      yamt #if defined(VMSWAP)
    161   1.72      yamt static struct uao_swhash_elt *uao_find_swhash_elt
    162   1.85   thorpej     (struct uvm_aobj *, int, bool);
    163   1.72      yamt 
    164   1.85   thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
    165   1.85   thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
    166   1.72      yamt #endif /* defined(VMSWAP) */
    167    1.1       mrg 
    168  1.121  riastrad static struct vm_page	*uao_pagealloc(struct uvm_object *, voff_t, int);
    169  1.121  riastrad 
    170    1.1       mrg /*
    171    1.1       mrg  * aobj_pager
    172   1.41       chs  *
    173    1.1       mrg  * note that some functions (e.g. put) are handled elsewhere
    174    1.1       mrg  */
    175    1.1       mrg 
    176   1.95      yamt const struct uvm_pagerops aobj_pager = {
    177   1.94      yamt 	.pgo_reference = uao_reference,
    178   1.94      yamt 	.pgo_detach = uao_detach,
    179   1.94      yamt 	.pgo_get = uao_get,
    180   1.94      yamt 	.pgo_put = uao_put,
    181    1.1       mrg };
    182    1.1       mrg 
    183    1.1       mrg /*
    184    1.1       mrg  * uao_list: global list of active aobjs, locked by uao_list_lock
    185    1.1       mrg  */
    186    1.1       mrg 
    187  1.117     rmind static LIST_HEAD(aobjlist, uvm_aobj) uao_list	__cacheline_aligned;
    188  1.117     rmind static kmutex_t		uao_list_lock		__cacheline_aligned;
    189    1.1       mrg 
    190    1.1       mrg /*
    191    1.1       mrg  * hash table/array related functions
    192    1.1       mrg  */
    193    1.1       mrg 
    194   1.72      yamt #if defined(VMSWAP)
    195   1.72      yamt 
    196    1.1       mrg /*
    197    1.1       mrg  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    198    1.1       mrg  * offset.
    199    1.1       mrg  *
    200    1.1       mrg  * => the object should be locked by the caller
    201    1.1       mrg  */
    202    1.1       mrg 
    203    1.5       mrg static struct uao_swhash_elt *
    204   1.85   thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    205    1.5       mrg {
    206    1.5       mrg 	struct uao_swhash *swhash;
    207    1.5       mrg 	struct uao_swhash_elt *elt;
    208   1.28    kleink 	voff_t page_tag;
    209    1.1       mrg 
    210   1.45       chs 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    211   1.45       chs 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    212    1.1       mrg 
    213    1.5       mrg 	/*
    214    1.5       mrg 	 * now search the bucket for the requested tag
    215    1.5       mrg 	 */
    216   1.45       chs 
    217   1.37       chs 	LIST_FOREACH(elt, swhash, list) {
    218   1.45       chs 		if (elt->tag == page_tag) {
    219   1.45       chs 			return elt;
    220   1.45       chs 		}
    221    1.5       mrg 	}
    222   1.45       chs 	if (!create) {
    223    1.5       mrg 		return NULL;
    224   1.45       chs 	}
    225    1.5       mrg 
    226    1.5       mrg 	/*
    227   1.12   thorpej 	 * allocate a new entry for the bucket and init/insert it in
    228    1.5       mrg 	 */
    229   1.45       chs 
    230   1.45       chs 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    231   1.45       chs 	if (elt == NULL) {
    232   1.45       chs 		return NULL;
    233   1.45       chs 	}
    234    1.5       mrg 	LIST_INSERT_HEAD(swhash, elt, list);
    235    1.5       mrg 	elt->tag = page_tag;
    236    1.5       mrg 	elt->count = 0;
    237    1.9     perry 	memset(elt->slots, 0, sizeof(elt->slots));
    238   1.45       chs 	return elt;
    239    1.1       mrg }
    240    1.1       mrg 
    241    1.1       mrg /*
    242    1.1       mrg  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    243    1.1       mrg  *
    244   1.41       chs  * => object must be locked by caller
    245    1.1       mrg  */
    246   1.46       chs 
    247   1.46       chs int
    248   1.67   thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
    249    1.1       mrg {
    250   1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    251   1.46       chs 	struct uao_swhash_elt *elt;
    252    1.1       mrg 
    253  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    254  1.141        ad 
    255    1.5       mrg 	/*
    256    1.5       mrg 	 * if noswap flag is set, then we never return a slot
    257    1.5       mrg 	 */
    258    1.1       mrg 
    259    1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    260  1.117     rmind 		return 0;
    261    1.1       mrg 
    262    1.5       mrg 	/*
    263    1.5       mrg 	 * if hashing, look in hash table.
    264    1.5       mrg 	 */
    265    1.1       mrg 
    266    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    267   1.87   thorpej 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    268  1.117     rmind 		return elt ? UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) : 0;
    269    1.5       mrg 	}
    270    1.1       mrg 
    271   1.41       chs 	/*
    272    1.5       mrg 	 * otherwise, look in the array
    273    1.5       mrg 	 */
    274   1.46       chs 
    275  1.117     rmind 	return aobj->u_swslots[pageidx];
    276    1.1       mrg }
    277    1.1       mrg 
    278    1.1       mrg /*
    279    1.1       mrg  * uao_set_swslot: set the swap slot for a page in an aobj.
    280    1.1       mrg  *
    281    1.1       mrg  * => setting a slot to zero frees the slot
    282    1.1       mrg  * => object must be locked by caller
    283   1.45       chs  * => we return the old slot number, or -1 if we failed to allocate
    284   1.45       chs  *    memory to record the new slot number
    285    1.1       mrg  */
    286   1.46       chs 
    287    1.5       mrg int
    288   1.67   thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    289    1.5       mrg {
    290    1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    291   1.45       chs 	struct uao_swhash_elt *elt;
    292    1.5       mrg 	int oldslot;
    293  1.149     skrll 	UVMHIST_FUNC(__func__);
    294  1.149     skrll 	UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
    295  1.126  pgoyette 	    (uintptr_t)aobj, pageidx, slot, 0);
    296    1.1       mrg 
    297  1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
    298  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    299  1.109     rmind 
    300    1.5       mrg 	/*
    301   1.46       chs 	 * if noswap flag is set, then we can't set a non-zero slot.
    302    1.5       mrg 	 */
    303    1.1       mrg 
    304    1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    305  1.117     rmind 		KASSERTMSG(slot == 0, "uao_set_swslot: no swap object");
    306  1.117     rmind 		return 0;
    307    1.5       mrg 	}
    308    1.1       mrg 
    309    1.5       mrg 	/*
    310    1.5       mrg 	 * are we using a hash table?  if so, add it in the hash.
    311    1.5       mrg 	 */
    312    1.1       mrg 
    313    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    314   1.39       chs 
    315   1.12   thorpej 		/*
    316   1.12   thorpej 		 * Avoid allocating an entry just to free it again if
    317   1.12   thorpej 		 * the page had not swap slot in the first place, and
    318   1.12   thorpej 		 * we are freeing.
    319   1.12   thorpej 		 */
    320   1.39       chs 
    321   1.46       chs 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    322   1.12   thorpej 		if (elt == NULL) {
    323   1.45       chs 			return slot ? -1 : 0;
    324   1.12   thorpej 		}
    325    1.5       mrg 
    326    1.5       mrg 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    327    1.5       mrg 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    328    1.5       mrg 
    329    1.5       mrg 		/*
    330    1.5       mrg 		 * now adjust the elt's reference counter and free it if we've
    331    1.5       mrg 		 * dropped it to zero.
    332    1.5       mrg 		 */
    333    1.5       mrg 
    334    1.5       mrg 		if (slot) {
    335    1.5       mrg 			if (oldslot == 0)
    336    1.5       mrg 				elt->count++;
    337   1.45       chs 		} else {
    338   1.45       chs 			if (oldslot)
    339    1.5       mrg 				elt->count--;
    340    1.5       mrg 
    341    1.5       mrg 			if (elt->count == 0) {
    342    1.5       mrg 				LIST_REMOVE(elt, list);
    343   1.12   thorpej 				pool_put(&uao_swhash_elt_pool, elt);
    344    1.5       mrg 			}
    345    1.5       mrg 		}
    346   1.41       chs 	} else {
    347    1.5       mrg 		/* we are using an array */
    348    1.5       mrg 		oldslot = aobj->u_swslots[pageidx];
    349    1.5       mrg 		aobj->u_swslots[pageidx] = slot;
    350    1.5       mrg 	}
    351  1.117     rmind 	return oldslot;
    352    1.1       mrg }
    353    1.1       mrg 
    354   1.72      yamt #endif /* defined(VMSWAP) */
    355   1.72      yamt 
    356    1.1       mrg /*
    357    1.1       mrg  * end of hash/array functions
    358    1.1       mrg  */
    359    1.1       mrg 
    360    1.1       mrg /*
    361    1.1       mrg  * uao_free: free all resources held by an aobj, and then free the aobj
    362    1.1       mrg  *
    363    1.1       mrg  * => the aobj should be dead
    364    1.1       mrg  */
    365   1.46       chs 
    366    1.1       mrg static void
    367   1.67   thorpej uao_free(struct uvm_aobj *aobj)
    368    1.1       mrg {
    369  1.117     rmind 	struct uvm_object *uobj = &aobj->u_obj;
    370   1.96        ad 
    371  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    372  1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    373  1.118     rmind 	uao_dropswap_range(uobj, 0, 0);
    374  1.135        ad 	rw_exit(uobj->vmobjlock);
    375   1.72      yamt 
    376   1.72      yamt #if defined(VMSWAP)
    377    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    378    1.1       mrg 
    379    1.5       mrg 		/*
    380   1.75      yamt 		 * free the hash table itself.
    381    1.5       mrg 		 */
    382   1.46       chs 
    383  1.104     rmind 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
    384    1.5       mrg 	} else {
    385    1.5       mrg 
    386    1.5       mrg 		/*
    387   1.75      yamt 		 * free the array itsself.
    388    1.5       mrg 		 */
    389    1.5       mrg 
    390  1.104     rmind 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
    391    1.1       mrg 	}
    392   1.72      yamt #endif /* defined(VMSWAP) */
    393   1.72      yamt 
    394    1.5       mrg 	/*
    395    1.5       mrg 	 * finally free the aobj itself
    396    1.5       mrg 	 */
    397   1.46       chs 
    398  1.117     rmind 	uvm_obj_destroy(uobj, true);
    399  1.113     rmind 	kmem_free(aobj, sizeof(struct uvm_aobj));
    400    1.1       mrg }
    401    1.1       mrg 
    402    1.1       mrg /*
    403    1.1       mrg  * pager functions
    404    1.1       mrg  */
    405    1.1       mrg 
    406    1.1       mrg /*
    407    1.1       mrg  * uao_create: create an aobj of the given size and return its uvm_object.
    408    1.1       mrg  *
    409    1.1       mrg  * => for normal use, flags are always zero
    410    1.1       mrg  * => for the kernel object, the flags are:
    411    1.1       mrg  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    412    1.1       mrg  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    413    1.1       mrg  */
    414   1.46       chs 
    415    1.5       mrg struct uvm_object *
    416  1.127       chs uao_create(voff_t size, int flags)
    417    1.5       mrg {
    418   1.46       chs 	static struct uvm_aobj kernel_object_store;
    419  1.135        ad 	static krwlock_t kernel_object_lock __cacheline_aligned;
    420  1.120    martin 	static int kobj_alloced __diagused = 0;
    421  1.127       chs 	pgoff_t pages = round_page((uint64_t)size) >> PAGE_SHIFT;
    422    1.5       mrg 	struct uvm_aobj *aobj;
    423   1.66      yamt 	int refs;
    424    1.1       mrg 
    425    1.5       mrg 	/*
    426  1.114     rmind 	 * Allocate a new aobj, unless kernel object is requested.
    427   1.27       chs 	 */
    428    1.5       mrg 
    429   1.46       chs 	if (flags & UAO_FLAG_KERNOBJ) {
    430   1.46       chs 		KASSERT(!kobj_alloced);
    431    1.5       mrg 		aobj = &kernel_object_store;
    432    1.5       mrg 		aobj->u_pages = pages;
    433   1.46       chs 		aobj->u_flags = UAO_FLAG_NOSWAP;
    434   1.66      yamt 		refs = UVM_OBJ_KERN;
    435    1.5       mrg 		kobj_alloced = UAO_FLAG_KERNOBJ;
    436    1.5       mrg 	} else if (flags & UAO_FLAG_KERNSWAP) {
    437   1.46       chs 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    438    1.5       mrg 		aobj = &kernel_object_store;
    439    1.5       mrg 		kobj_alloced = UAO_FLAG_KERNSWAP;
    440   1.66      yamt 		refs = 0xdeadbeaf; /* XXX: gcc */
    441   1.46       chs 	} else {
    442  1.113     rmind 		aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
    443    1.5       mrg 		aobj->u_pages = pages;
    444   1.46       chs 		aobj->u_flags = 0;
    445   1.66      yamt 		refs = 1;
    446    1.5       mrg 	}
    447    1.1       mrg 
    448    1.5       mrg 	/*
    449  1.121  riastrad 	 * no freelist by default
    450  1.121  riastrad 	 */
    451  1.121  riastrad 
    452  1.121  riastrad 	aobj->u_freelist = VM_NFREELIST;
    453  1.121  riastrad 
    454  1.121  riastrad 	/*
    455    1.5       mrg  	 * allocate hash/array if necessary
    456    1.5       mrg  	 *
    457    1.5       mrg  	 * note: in the KERNSWAP case no need to worry about locking since
    458    1.5       mrg  	 * we are still booting we should be the only thread around.
    459    1.5       mrg  	 */
    460   1.46       chs 
    461    1.5       mrg 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    462   1.72      yamt #if defined(VMSWAP)
    463  1.104     rmind 		const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
    464    1.5       mrg 
    465    1.5       mrg 		/* allocate hash table or array depending on object size */
    466   1.27       chs 		if (UAO_USES_SWHASH(aobj)) {
    467  1.104     rmind 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    468  1.104     rmind 			    HASH_LIST, kernswap ? false : true,
    469  1.104     rmind 			    &aobj->u_swhashmask);
    470    1.5       mrg 			if (aobj->u_swhash == NULL)
    471    1.5       mrg 				panic("uao_create: hashinit swhash failed");
    472    1.5       mrg 		} else {
    473  1.104     rmind 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
    474  1.104     rmind 			    kernswap ? KM_NOSLEEP : KM_SLEEP);
    475    1.5       mrg 			if (aobj->u_swslots == NULL)
    476  1.114     rmind 				panic("uao_create: swslots allocation failed");
    477    1.5       mrg 		}
    478   1.72      yamt #endif /* defined(VMSWAP) */
    479    1.5       mrg 
    480    1.5       mrg 		if (flags) {
    481    1.5       mrg 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    482  1.117     rmind 			return &aobj->u_obj;
    483    1.5       mrg 		}
    484    1.5       mrg 	}
    485    1.5       mrg 
    486    1.5       mrg 	/*
    487  1.115     rmind 	 * Initialise UVM object.
    488  1.115     rmind 	 */
    489   1.46       chs 
    490  1.115     rmind 	const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
    491  1.115     rmind 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
    492  1.115     rmind 	if (__predict_false(kernobj)) {
    493  1.115     rmind 		/* Initialisation only once, for UAO_FLAG_KERNOBJ. */
    494  1.135        ad 		rw_init(&kernel_object_lock);
    495  1.115     rmind 		uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
    496  1.115     rmind 	}
    497    1.1       mrg 
    498    1.5       mrg 	/*
    499    1.5       mrg  	 * now that aobj is ready, add it to the global list
    500    1.5       mrg  	 */
    501   1.46       chs 
    502   1.90        ad 	mutex_enter(&uao_list_lock);
    503    1.5       mrg 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    504   1.90        ad 	mutex_exit(&uao_list_lock);
    505    1.5       mrg 	return(&aobj->u_obj);
    506    1.1       mrg }
    507    1.1       mrg 
    508    1.1       mrg /*
    509  1.121  riastrad  * uao_set_pgfl: allocate pages only from the specified freelist.
    510  1.121  riastrad  *
    511  1.121  riastrad  * => must be called before any pages are allocated for the object.
    512  1.122  riastrad  * => reset by setting it to VM_NFREELIST, meaning any freelist.
    513  1.121  riastrad  */
    514  1.121  riastrad 
    515  1.121  riastrad void
    516  1.121  riastrad uao_set_pgfl(struct uvm_object *uobj, int freelist)
    517  1.121  riastrad {
    518  1.121  riastrad 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    519  1.121  riastrad 
    520  1.121  riastrad 	KASSERTMSG((0 <= freelist), "invalid freelist %d", freelist);
    521  1.122  riastrad 	KASSERTMSG((freelist <= VM_NFREELIST), "invalid freelist %d",
    522  1.122  riastrad 	    freelist);
    523  1.121  riastrad 
    524  1.121  riastrad 	aobj->u_freelist = freelist;
    525  1.121  riastrad }
    526  1.121  riastrad 
    527  1.121  riastrad /*
    528  1.121  riastrad  * uao_pagealloc: allocate a page for aobj.
    529  1.121  riastrad  */
    530  1.121  riastrad 
    531  1.121  riastrad static inline struct vm_page *
    532  1.121  riastrad uao_pagealloc(struct uvm_object *uobj, voff_t offset, int flags)
    533  1.121  riastrad {
    534  1.121  riastrad 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    535  1.121  riastrad 
    536  1.121  riastrad 	if (__predict_true(aobj->u_freelist == VM_NFREELIST))
    537  1.121  riastrad 		return uvm_pagealloc(uobj, offset, NULL, flags);
    538  1.121  riastrad 	else
    539  1.121  riastrad 		return uvm_pagealloc_strat(uobj, offset, NULL, flags,
    540  1.121  riastrad 		    UVM_PGA_STRAT_ONLY, aobj->u_freelist);
    541  1.121  riastrad }
    542  1.121  riastrad 
    543  1.121  riastrad /*
    544    1.1       mrg  * uao_init: set up aobj pager subsystem
    545    1.1       mrg  *
    546    1.1       mrg  * => called at boot time from uvm_pager_init()
    547    1.1       mrg  */
    548   1.46       chs 
    549   1.27       chs void
    550   1.46       chs uao_init(void)
    551    1.5       mrg {
    552   1.12   thorpej 	static int uao_initialized;
    553   1.12   thorpej 
    554   1.12   thorpej 	if (uao_initialized)
    555   1.12   thorpej 		return;
    556   1.87   thorpej 	uao_initialized = true;
    557    1.5       mrg 	LIST_INIT(&uao_list);
    558   1.96        ad 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    559  1.107     pooka 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    560  1.107     pooka 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
    561    1.1       mrg }
    562    1.1       mrg 
    563    1.1       mrg /*
    564  1.118     rmind  * uao_reference: hold a reference to an anonymous UVM object.
    565    1.1       mrg  */
    566    1.5       mrg void
    567   1.67   thorpej uao_reference(struct uvm_object *uobj)
    568    1.1       mrg {
    569  1.118     rmind 	/* Kernel object is persistent. */
    570  1.118     rmind 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    571  1.101        ad 		return;
    572  1.118     rmind 	}
    573  1.118     rmind 	atomic_inc_uint(&uobj->uo_refs);
    574    1.1       mrg }
    575    1.1       mrg 
    576    1.1       mrg /*
    577  1.118     rmind  * uao_detach: drop a reference to an anonymous UVM object.
    578    1.1       mrg  */
    579    1.5       mrg void
    580   1.67   thorpej uao_detach(struct uvm_object *uobj)
    581    1.5       mrg {
    582  1.118     rmind 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    583  1.132        ad 	struct uvm_page_array a;
    584  1.118     rmind 	struct vm_page *pg;
    585  1.118     rmind 
    586  1.149     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    587  1.101        ad 
    588  1.101        ad 	/*
    589  1.118     rmind 	 * Detaching from kernel object is a NOP.
    590  1.118     rmind 	 */
    591  1.101        ad 
    592  1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    593  1.102        ad 		return;
    594  1.101        ad 
    595    1.5       mrg 	/*
    596  1.118     rmind 	 * Drop the reference.  If it was the last one, destroy the object.
    597  1.118     rmind 	 */
    598    1.5       mrg 
    599  1.125       chs 	KASSERT(uobj->uo_refs > 0);
    600  1.136       rin 	UVMHIST_LOG(maphist,"  (uobj=%#jx)  ref=%jd",
    601  1.126  pgoyette 	    (uintptr_t)uobj, uobj->uo_refs, 0, 0);
    602  1.118     rmind 	if (atomic_dec_uint_nv(&uobj->uo_refs) > 0) {
    603    1.5       mrg 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    604    1.5       mrg 		return;
    605    1.5       mrg 	}
    606    1.5       mrg 
    607    1.5       mrg 	/*
    608  1.118     rmind 	 * Remove the aobj from the global list.
    609  1.118     rmind 	 */
    610   1.46       chs 
    611   1.92        ad 	mutex_enter(&uao_list_lock);
    612    1.5       mrg 	LIST_REMOVE(aobj, u_list);
    613   1.92        ad 	mutex_exit(&uao_list_lock);
    614    1.5       mrg 
    615    1.5       mrg 	/*
    616  1.118     rmind 	 * Free all the pages left in the aobj.  For each page, when the
    617  1.118     rmind 	 * page is no longer busy (and thus after any disk I/O that it is
    618  1.118     rmind 	 * involved in is complete), release any swap resources and free
    619  1.118     rmind 	 * the page itself.
    620  1.118     rmind 	 */
    621  1.146        ad 	uvm_page_array_init(&a, uobj, 0);
    622  1.135        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
    623  1.146        ad 	while ((pg = uvm_page_array_fill_and_peek(&a, 0, 0)) != NULL) {
    624  1.132        ad 		uvm_page_array_advance(&a);
    625  1.130        ad 		pmap_page_protect(pg, VM_PROT_NONE);
    626    1.5       mrg 		if (pg->flags & PG_BUSY) {
    627  1.137        ad 			uvm_pagewait(pg, uobj->vmobjlock, "uao_det");
    628  1.132        ad 			uvm_page_array_clear(&a);
    629  1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    630    1.5       mrg 			continue;
    631    1.5       mrg 		}
    632   1.18       chs 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    633    1.5       mrg 		uvm_pagefree(pg);
    634    1.5       mrg 	}
    635  1.132        ad 	uvm_page_array_fini(&a);
    636    1.1       mrg 
    637    1.5       mrg 	/*
    638  1.118     rmind 	 * Finally, free the anonymous UVM object itself.
    639  1.118     rmind 	 */
    640    1.1       mrg 
    641    1.5       mrg 	uao_free(aobj);
    642    1.5       mrg }
    643    1.1       mrg 
    644    1.1       mrg /*
    645   1.46       chs  * uao_put: flush pages out of a uvm object
    646   1.22   thorpej  *
    647   1.22   thorpej  * => object should be locked by caller.  we may _unlock_ the object
    648   1.22   thorpej  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    649   1.22   thorpej  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    650   1.22   thorpej  *	XXXJRT a page, we simply just deactivate it.  Should probably
    651   1.22   thorpej  *	XXXJRT handle this better, in the future (although "flushing"
    652   1.22   thorpej  *	XXXJRT anonymous memory isn't terribly important).
    653   1.22   thorpej  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    654   1.22   thorpej  *	or block.
    655   1.22   thorpej  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    656   1.22   thorpej  *	for flushing.
    657   1.86      matt  * => we return 0 unless we encountered some sort of I/O error
    658   1.22   thorpej  *	XXXJRT currently never happens, as we never directly initiate
    659   1.22   thorpej  *	XXXJRT I/O
    660    1.1       mrg  */
    661   1.22   thorpej 
    662   1.68   thorpej static int
    663   1.67   thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    664    1.5       mrg {
    665   1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    666  1.132        ad 	struct uvm_page_array a;
    667  1.132        ad 	struct vm_page *pg;
    668   1.28    kleink 	voff_t curoff;
    669  1.149     skrll 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
    670   1.22   thorpej 
    671  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    672  1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
    673   1.96        ad 
    674   1.22   thorpej 	if (flags & PGO_ALLPAGES) {
    675   1.22   thorpej 		start = 0;
    676   1.22   thorpej 		stop = aobj->u_pages << PAGE_SHIFT;
    677   1.22   thorpej 	} else {
    678   1.22   thorpej 		start = trunc_page(start);
    679   1.71      yamt 		if (stop == 0) {
    680   1.71      yamt 			stop = aobj->u_pages << PAGE_SHIFT;
    681   1.71      yamt 		} else {
    682   1.71      yamt 			stop = round_page(stop);
    683   1.71      yamt 		}
    684  1.127       chs 		if (stop > (uint64_t)(aobj->u_pages << PAGE_SHIFT)) {
    685  1.127       chs 			printf("uao_put: strange, got an out of range "
    686  1.136       rin 			    "flush %#jx > %#jx (fixed)\n",
    687  1.127       chs 			    (uintmax_t)stop,
    688  1.127       chs 			    (uintmax_t)(aobj->u_pages << PAGE_SHIFT));
    689   1.22   thorpej 			stop = aobj->u_pages << PAGE_SHIFT;
    690   1.22   thorpej 		}
    691   1.22   thorpej 	}
    692   1.22   thorpej 	UVMHIST_LOG(maphist,
    693  1.136       rin 	    " flush start=%#jx, stop=%#jx, flags=%#jx",
    694  1.132        ad 	    start, stop, flags, 0);
    695    1.1       mrg 
    696    1.5       mrg 	/*
    697   1.22   thorpej 	 * Don't need to do any work here if we're not freeing
    698   1.22   thorpej 	 * or deactivating pages.
    699   1.22   thorpej 	 */
    700   1.46       chs 
    701   1.22   thorpej 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    702  1.135        ad 		rw_exit(uobj->vmobjlock);
    703   1.46       chs 		return 0;
    704   1.22   thorpej 	}
    705   1.22   thorpej 
    706   1.99        ad 	/* locked: uobj */
    707  1.146        ad 	uvm_page_array_init(&a, uobj, 0);
    708  1.132        ad 	curoff = start;
    709  1.146        ad 	while ((pg = uvm_page_array_fill_and_peek(&a, curoff, 0)) != NULL) {
    710  1.132        ad 		if (pg->offset >= stop) {
    711  1.132        ad 			break;
    712   1.22   thorpej 		}
    713   1.98      yamt 
    714   1.98      yamt 		/*
    715   1.98      yamt 		 * wait and try again if the page is busy.
    716   1.98      yamt 		 */
    717   1.98      yamt 
    718   1.98      yamt 		if (pg->flags & PG_BUSY) {
    719  1.137        ad 			uvm_pagewait(pg, uobj->vmobjlock, "uao_put");
    720  1.132        ad 			uvm_page_array_clear(&a);
    721  1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    722   1.98      yamt 			continue;
    723   1.98      yamt 		}
    724  1.132        ad 		uvm_page_array_advance(&a);
    725  1.132        ad 		curoff = pg->offset + PAGE_SIZE;
    726   1.98      yamt 
    727   1.46       chs 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    728   1.41       chs 
    729   1.22   thorpej 		/*
    730   1.22   thorpej 		 * XXX In these first 3 cases, we always just
    731   1.22   thorpej 		 * XXX deactivate the page.  We may want to
    732   1.22   thorpej 		 * XXX handle the different cases more specifically
    733   1.22   thorpej 		 * XXX in the future.
    734   1.22   thorpej 		 */
    735   1.46       chs 
    736   1.22   thorpej 		case PGO_CLEANIT|PGO_FREE:
    737   1.22   thorpej 		case PGO_CLEANIT|PGO_DEACTIVATE:
    738   1.22   thorpej 		case PGO_DEACTIVATE:
    739   1.25   thorpej  deactivate_it:
    740  1.133        ad  			uvm_pagelock(pg);
    741  1.131        ad 			uvm_pagedeactivate(pg);
    742  1.133        ad  			uvm_pageunlock(pg);
    743   1.98      yamt 			break;
    744   1.22   thorpej 
    745   1.22   thorpej 		case PGO_FREE:
    746   1.25   thorpej 			/*
    747   1.25   thorpej 			 * If there are multiple references to
    748   1.25   thorpej 			 * the object, just deactivate the page.
    749   1.25   thorpej 			 */
    750   1.46       chs 
    751   1.25   thorpej 			if (uobj->uo_refs > 1)
    752   1.25   thorpej 				goto deactivate_it;
    753   1.25   thorpej 
    754   1.22   thorpej 			/*
    755   1.98      yamt 			 * free the swap slot and the page.
    756   1.22   thorpej 			 */
    757   1.46       chs 
    758   1.46       chs 			pmap_page_protect(pg, VM_PROT_NONE);
    759   1.75      yamt 
    760   1.75      yamt 			/*
    761   1.75      yamt 			 * freeing swapslot here is not strictly necessary.
    762   1.75      yamt 			 * however, leaving it here doesn't save much
    763   1.75      yamt 			 * because we need to update swap accounting anyway.
    764   1.75      yamt 			 */
    765   1.75      yamt 
    766   1.46       chs 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    767   1.46       chs 			uvm_pagefree(pg);
    768   1.98      yamt 			break;
    769   1.98      yamt 
    770   1.98      yamt 		default:
    771   1.98      yamt 			panic("%s: impossible", __func__);
    772   1.22   thorpej 		}
    773   1.22   thorpej 	}
    774  1.135        ad 	rw_exit(uobj->vmobjlock);
    775  1.132        ad 	uvm_page_array_fini(&a);
    776   1.46       chs 	return 0;
    777    1.1       mrg }
    778    1.1       mrg 
    779    1.1       mrg /*
    780    1.1       mrg  * uao_get: fetch me a page
    781    1.1       mrg  *
    782    1.1       mrg  * we have three cases:
    783    1.1       mrg  * 1: page is resident     -> just return the page.
    784    1.1       mrg  * 2: page is zero-fill    -> allocate a new page and zero it.
    785    1.1       mrg  * 3: page is swapped out  -> fetch the page from swap.
    786    1.1       mrg  *
    787  1.142        ad  * case 1 can be handled with PGO_LOCKED, cases 2 and 3 cannot.
    788  1.142        ad  * so, if the "center" page hits case 2/3 then we will need to return EBUSY.
    789    1.1       mrg  *
    790    1.1       mrg  * => prefer map unlocked (not required)
    791    1.1       mrg  * => object must be locked!  we will _unlock_ it before starting any I/O.
    792  1.142        ad  * => flags: PGO_LOCKED: fault data structures are locked
    793    1.1       mrg  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    794    1.1       mrg  * => NOTE: caller must check for released pages!!
    795    1.1       mrg  */
    796   1.46       chs 
    797    1.5       mrg static int
    798   1.67   thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    799   1.82      yamt     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    800    1.5       mrg {
    801   1.28    kleink 	voff_t current_offset;
    802  1.147        ad 	struct vm_page *ptmp;
    803  1.147        ad 	int lcv, gotpages, maxpages, swslot, pageidx;
    804  1.144        ad 	bool overwrite = ((flags & PGO_OVERWRITE) != 0);
    805  1.147        ad 	struct uvm_page_array a;
    806    1.5       mrg 
    807  1.149     skrll 	UVMHIST_FUNC(__func__);
    808  1.149     skrll 	UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
    809  1.126  pgoyette 		    (uintptr_t)uobj, offset, flags,0);
    810   1.37       chs 
    811    1.5       mrg 	/*
    812  1.139        ad 	 * the object must be locked.  it can only be a read lock when
    813  1.141        ad 	 * processing a read fault with PGO_LOCKED.
    814  1.139        ad 	 */
    815  1.139        ad 
    816  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
    817  1.139        ad 	KASSERT(rw_lock_held(uobj->vmobjlock));
    818  1.139        ad 	KASSERT(rw_write_held(uobj->vmobjlock) ||
    819  1.141        ad 	   ((flags & PGO_LOCKED) != 0 && (access_type & VM_PROT_WRITE) == 0));
    820  1.139        ad 
    821  1.139        ad 	/*
    822    1.5       mrg  	 * get number of pages
    823    1.5       mrg  	 */
    824   1.46       chs 
    825    1.5       mrg 	maxpages = *npagesp;
    826    1.5       mrg 
    827    1.5       mrg 	/*
    828    1.5       mrg  	 * step 1: handled the case where fault data structures are locked.
    829    1.5       mrg  	 */
    830    1.1       mrg 
    831    1.5       mrg 	if (flags & PGO_LOCKED) {
    832   1.46       chs 
    833    1.5       mrg 		/*
    834    1.5       mrg  		 * step 1a: get pages that are already resident.   only do
    835    1.5       mrg 		 * this if the data structures are locked (i.e. the first
    836    1.5       mrg 		 * time through).
    837    1.5       mrg  		 */
    838    1.5       mrg 
    839  1.146        ad 		uvm_page_array_init(&a, uobj, 0);
    840    1.5       mrg 		gotpages = 0;	/* # of pages we got so far */
    841  1.141        ad 		for (lcv = 0; lcv < maxpages; lcv++) {
    842  1.146        ad 			ptmp = uvm_page_array_fill_and_peek(&a,
    843  1.146        ad 			    offset + (lcv << PAGE_SHIFT), maxpages);
    844  1.141        ad 			if (ptmp == NULL) {
    845  1.141        ad 				break;
    846  1.141        ad 			}
    847  1.141        ad 			KASSERT(ptmp->offset >= offset);
    848  1.141        ad 			lcv = (ptmp->offset - offset) >> PAGE_SHIFT;
    849  1.141        ad 			if (lcv >= maxpages) {
    850  1.141        ad 				break;
    851    1.5       mrg 			}
    852  1.141        ad 			uvm_page_array_advance(&a);
    853    1.5       mrg 
    854    1.5       mrg 			/*
    855   1.46       chs 			 * to be useful must get a non-busy page
    856    1.5       mrg 			 */
    857   1.46       chs 
    858  1.141        ad 			if ((ptmp->flags & PG_BUSY) != 0) {
    859  1.124    martin 				continue;
    860    1.5       mrg 			}
    861    1.5       mrg 
    862    1.5       mrg 			/*
    863  1.141        ad 			 * useful page: plug it in our result array
    864    1.5       mrg 			 */
    865  1.141        ad 
    866  1.134        ad 			KASSERT(uvm_pagegetdirty(ptmp) !=
    867  1.134        ad 			    UVM_PAGE_STATUS_CLEAN);
    868    1.5       mrg 			pps[lcv] = ptmp;
    869    1.5       mrg 			gotpages++;
    870   1.46       chs 		}
    871  1.141        ad 		uvm_page_array_fini(&a);
    872    1.5       mrg 
    873    1.5       mrg 		/*
    874    1.5       mrg  		 * step 1b: now we've either done everything needed or we
    875    1.5       mrg 		 * to unlock and do some waiting or I/O.
    876    1.5       mrg  		 */
    877    1.5       mrg 
    878  1.143   hannken 		UVMHIST_LOG(pdhist, "<- done (done=%jd)",
    879  1.143   hannken 		    (pps[centeridx] != NULL), 0,0,0);
    880    1.5       mrg 		*npagesp = gotpages;
    881  1.142        ad 		return pps[centeridx] != NULL ? 0 : EBUSY;
    882    1.1       mrg 	}
    883    1.1       mrg 
    884    1.5       mrg 	/*
    885    1.5       mrg  	 * step 2: get non-resident or busy pages.
    886    1.5       mrg  	 * object is locked.   data structures are unlocked.
    887    1.5       mrg  	 */
    888    1.5       mrg 
    889   1.76      yamt 	if ((flags & PGO_SYNCIO) == 0) {
    890   1.76      yamt 		goto done;
    891   1.76      yamt 	}
    892   1.76      yamt 
    893  1.147        ad 	uvm_page_array_init(&a, uobj, 0);
    894  1.147        ad 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;) {
    895   1.27       chs 
    896    1.5       mrg 		/*
    897    1.5       mrg  		 * we have yet to locate the current page (pps[lcv]).   we
    898    1.5       mrg 		 * first look for a page that is already at the current offset.
    899    1.5       mrg 		 * if we find a page, we check to see if it is busy or
    900    1.5       mrg 		 * released.  if that is the case, then we sleep on the page
    901    1.5       mrg 		 * until it is no longer busy or released and repeat the lookup.
    902    1.5       mrg 		 * if the page we found is neither busy nor released, then we
    903  1.147        ad 		 * busy it (so we own it) and plug it into pps[lcv].   we are
    904  1.147        ad 		 * ready to move on to the next page.
    905    1.5       mrg  		 */
    906    1.5       mrg 
    907  1.147        ad 		ptmp = uvm_page_array_fill_and_peek(&a, current_offset,
    908  1.147        ad 		    maxpages - lcv);
    909    1.5       mrg 
    910  1.147        ad 		if (ptmp != NULL && ptmp->offset == current_offset) {
    911    1.5       mrg 			/* page is there, see if we need to wait on it */
    912   1.46       chs 			if ((ptmp->flags & PG_BUSY) != 0) {
    913    1.5       mrg 				UVMHIST_LOG(pdhist,
    914  1.136       rin 				    "sleeping, ptmp->flags %#jx\n",
    915    1.5       mrg 				    ptmp->flags,0,0,0);
    916  1.137        ad 				uvm_pagewait(ptmp, uobj->vmobjlock, "uao_get");
    917  1.135        ad 				rw_enter(uobj->vmobjlock, RW_WRITER);
    918  1.147        ad 				uvm_page_array_clear(&a);
    919   1.46       chs 				continue;
    920    1.5       mrg 			}
    921   1.41       chs 
    922   1.41       chs 			/*
    923  1.147        ad  			 * if we get here then the page is resident and
    924  1.147        ad 			 * unbusy.  we busy it now (so we own it).  if
    925  1.147        ad 			 * overwriting, mark the page dirty up front as
    926  1.147        ad 			 * it will be zapped via an unmanaged mapping.
    927    1.5       mrg  			 */
    928   1.46       chs 
    929  1.134        ad 			KASSERT(uvm_pagegetdirty(ptmp) !=
    930  1.134        ad 			    UVM_PAGE_STATUS_CLEAN);
    931  1.145        ad 			if (overwrite) {
    932  1.145        ad 				uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
    933  1.145        ad 			}
    934    1.5       mrg 			/* we own it, caller must un-busy */
    935    1.5       mrg 			ptmp->flags |= PG_BUSY;
    936    1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get2");
    937  1.147        ad 			pps[lcv++] = ptmp;
    938  1.147        ad 			current_offset += PAGE_SIZE;
    939  1.147        ad 			uvm_page_array_advance(&a);
    940  1.147        ad 			continue;
    941  1.147        ad 		} else {
    942  1.147        ad 			KASSERT(ptmp == NULL || ptmp->offset > current_offset);
    943    1.5       mrg 		}
    944    1.5       mrg 
    945    1.5       mrg 		/*
    946  1.147        ad 		 * not resident.  allocate a new busy/fake/clean page in the
    947  1.147        ad 		 * object.  if it's in swap we need to do I/O to fill in the
    948  1.147        ad 		 * data, otherwise the page needs to be cleared: if it's not
    949  1.147        ad 		 * destined to be overwritten, then zero it here and now.
    950  1.147        ad 		 */
    951   1.46       chs 
    952  1.147        ad 		pageidx = current_offset >> PAGE_SHIFT;
    953  1.147        ad 		swslot = uao_find_swslot(uobj, pageidx);
    954  1.147        ad 		ptmp = uao_pagealloc(uobj, current_offset,
    955  1.147        ad 		    swslot != 0 || overwrite ? 0 : UVM_PGA_ZERO);
    956  1.147        ad 
    957  1.147        ad 		/* out of RAM? */
    958  1.147        ad 		if (ptmp == NULL) {
    959  1.147        ad 			rw_exit(uobj->vmobjlock);
    960  1.150    simonb 			UVMHIST_LOG(pdhist, "sleeping, ptmp == NULL",0,0,0,0);
    961  1.147        ad 			uvm_wait("uao_getpage");
    962  1.147        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    963  1.147        ad 			uvm_page_array_clear(&a);
    964  1.147        ad 			continue;
    965  1.147        ad 		}
    966    1.5       mrg 
    967    1.5       mrg 		/*
    968  1.148     skrll  		 * if swslot == 0, page hasn't existed before and is zeroed.
    969  1.142        ad  		 * otherwise we have a "fake/busy/clean" page that we just
    970  1.142        ad  		 * allocated.  do the needed "i/o", reading from swap.
    971    1.5       mrg  		 */
    972   1.46       chs 
    973  1.142        ad 		if (swslot != 0) {
    974   1.72      yamt #if defined(VMSWAP)
    975   1.72      yamt 			int error;
    976   1.72      yamt 
    977  1.126  pgoyette 			UVMHIST_LOG(pdhist, "pagein from swslot %jd",
    978    1.5       mrg 			     swslot, 0,0,0);
    979    1.5       mrg 
    980    1.5       mrg 			/*
    981    1.5       mrg 			 * page in the swapped-out page.
    982    1.5       mrg 			 * unlock object for i/o, relock when done.
    983    1.5       mrg 			 */
    984   1.46       chs 
    985  1.135        ad 			rw_exit(uobj->vmobjlock);
    986   1.46       chs 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
    987  1.135        ad 			rw_enter(uobj->vmobjlock, RW_WRITER);
    988    1.5       mrg 
    989    1.5       mrg 			/*
    990    1.5       mrg 			 * I/O done.  check for errors.
    991    1.5       mrg 			 */
    992   1.46       chs 
    993   1.46       chs 			if (error != 0) {
    994  1.126  pgoyette 				UVMHIST_LOG(pdhist, "<- done (error=%jd)",
    995   1.46       chs 				    error,0,0,0);
    996   1.27       chs 
    997   1.27       chs 				/*
    998   1.27       chs 				 * remove the swap slot from the aobj
    999   1.27       chs 				 * and mark the aobj as having no real slot.
   1000   1.27       chs 				 * don't free the swap slot, thus preventing
   1001   1.27       chs 				 * it from being used again.
   1002   1.27       chs 				 */
   1003   1.46       chs 
   1004  1.118     rmind 				swslot = uao_set_swslot(uobj, pageidx,
   1005  1.118     rmind 				    SWSLOT_BAD);
   1006   1.57        pk 				if (swslot > 0) {
   1007   1.45       chs 					uvm_swap_markbad(swslot, 1);
   1008   1.45       chs 				}
   1009   1.27       chs 
   1010    1.5       mrg 				uvm_pagefree(ptmp);
   1011  1.135        ad 				rw_exit(uobj->vmobjlock);
   1012  1.142        ad 				UVMHIST_LOG(pdhist, "<- done (error)",
   1013  1.142        ad 				    error,lcv,0,0);
   1014  1.142        ad 				if (lcv != 0) {
   1015  1.142        ad 					uvm_page_unbusy(pps, lcv);
   1016  1.142        ad 				}
   1017  1.142        ad 				memset(pps, 0, maxpages * sizeof(pps[0]));
   1018   1.46       chs 				return error;
   1019    1.5       mrg 			}
   1020   1.72      yamt #else /* defined(VMSWAP) */
   1021   1.72      yamt 			panic("%s: pagein", __func__);
   1022   1.72      yamt #endif /* defined(VMSWAP) */
   1023    1.5       mrg 		}
   1024    1.5       mrg 
   1025  1.134        ad 		/*
   1026  1.134        ad 		 * note that we will allow the page being writably-mapped
   1027  1.144        ad 		 * (!PG_RDONLY) regardless of access_type.  if overwrite,
   1028  1.144        ad 		 * the page can be modified through an unmanaged mapping
   1029  1.144        ad 		 * so mark it dirty up front.
   1030  1.134        ad 		 */
   1031  1.144        ad 		if (overwrite) {
   1032  1.144        ad 			uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_DIRTY);
   1033  1.144        ad 		} else {
   1034  1.144        ad 			uvm_pagemarkdirty(ptmp, UVM_PAGE_STATUS_UNKNOWN);
   1035  1.144        ad 		}
   1036   1.78      yamt 
   1037   1.41       chs 		/*
   1038    1.5       mrg  		 * we got the page!   clear the fake flag (indicates valid
   1039    1.5       mrg 		 * data now in page) and plug into our result array.   note
   1040   1.41       chs 		 * that page is still busy.
   1041    1.5       mrg  		 *
   1042    1.5       mrg  		 * it is the callers job to:
   1043    1.5       mrg  		 * => check if the page is released
   1044    1.5       mrg  		 * => unbusy the page
   1045    1.5       mrg  		 * => activate the page
   1046    1.5       mrg  		 */
   1047  1.134        ad 		KASSERT(uvm_pagegetdirty(ptmp) != UVM_PAGE_STATUS_CLEAN);
   1048  1.134        ad 		KASSERT((ptmp->flags & PG_FAKE) != 0);
   1049  1.147        ad 		KASSERT(ptmp->offset == current_offset);
   1050   1.46       chs 		ptmp->flags &= ~PG_FAKE;
   1051  1.147        ad 		pps[lcv++] = ptmp;
   1052  1.147        ad 		current_offset += PAGE_SIZE;
   1053   1.46       chs 	}
   1054  1.147        ad 	uvm_page_array_fini(&a);
   1055    1.1       mrg 
   1056    1.1       mrg 	/*
   1057    1.5       mrg  	 * finally, unlock object and return.
   1058    1.5       mrg  	 */
   1059    1.1       mrg 
   1060   1.76      yamt done:
   1061  1.135        ad 	rw_exit(uobj->vmobjlock);
   1062    1.5       mrg 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1063   1.46       chs 	return 0;
   1064    1.1       mrg }
   1065    1.1       mrg 
   1066   1.72      yamt #if defined(VMSWAP)
   1067   1.72      yamt 
   1068    1.1       mrg /*
   1069   1.18       chs  * uao_dropswap:  release any swap resources from this aobj page.
   1070   1.41       chs  *
   1071   1.18       chs  * => aobj must be locked or have a reference count of 0.
   1072   1.18       chs  */
   1073   1.18       chs 
   1074   1.18       chs void
   1075   1.67   thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
   1076   1.18       chs {
   1077   1.18       chs 	int slot;
   1078   1.18       chs 
   1079  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
   1080  1.141        ad 
   1081   1.18       chs 	slot = uao_set_swslot(uobj, pageidx, 0);
   1082   1.18       chs 	if (slot) {
   1083   1.18       chs 		uvm_swap_free(slot, 1);
   1084   1.18       chs 	}
   1085   1.27       chs }
   1086   1.27       chs 
   1087   1.27       chs /*
   1088   1.27       chs  * page in every page in every aobj that is paged-out to a range of swslots.
   1089   1.41       chs  *
   1090   1.27       chs  * => nothing should be locked.
   1091   1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1092   1.27       chs  */
   1093   1.46       chs 
   1094   1.85   thorpej bool
   1095   1.67   thorpej uao_swap_off(int startslot, int endslot)
   1096   1.27       chs {
   1097  1.118     rmind 	struct uvm_aobj *aobj;
   1098   1.27       chs 
   1099   1.27       chs 	/*
   1100  1.118     rmind 	 * Walk the list of all anonymous UVM objects.  Grab the first.
   1101   1.27       chs 	 */
   1102  1.118     rmind 	mutex_enter(&uao_list_lock);
   1103  1.118     rmind 	if ((aobj = LIST_FIRST(&uao_list)) == NULL) {
   1104  1.118     rmind 		mutex_exit(&uao_list_lock);
   1105  1.118     rmind 		return false;
   1106  1.118     rmind 	}
   1107  1.118     rmind 	uao_reference(&aobj->u_obj);
   1108   1.27       chs 
   1109  1.118     rmind 	do {
   1110  1.118     rmind 		struct uvm_aobj *nextaobj;
   1111  1.118     rmind 		bool rv;
   1112   1.27       chs 
   1113   1.27       chs 		/*
   1114  1.118     rmind 		 * Prefetch the next object and immediately hold a reference
   1115  1.118     rmind 		 * on it, so neither the current nor the next entry could
   1116  1.118     rmind 		 * disappear while we are iterating.
   1117   1.27       chs 		 */
   1118  1.118     rmind 		if ((nextaobj = LIST_NEXT(aobj, u_list)) != NULL) {
   1119  1.118     rmind 			uao_reference(&nextaobj->u_obj);
   1120   1.27       chs 		}
   1121   1.90        ad 		mutex_exit(&uao_list_lock);
   1122   1.27       chs 
   1123   1.27       chs 		/*
   1124  1.118     rmind 		 * Page in all pages in the swap slot range.
   1125   1.27       chs 		 */
   1126  1.135        ad 		rw_enter(aobj->u_obj.vmobjlock, RW_WRITER);
   1127  1.118     rmind 		rv = uao_pagein(aobj, startslot, endslot);
   1128  1.135        ad 		rw_exit(aobj->u_obj.vmobjlock);
   1129   1.46       chs 
   1130  1.118     rmind 		/* Drop the reference of the current object. */
   1131  1.118     rmind 		uao_detach(&aobj->u_obj);
   1132   1.27       chs 		if (rv) {
   1133  1.118     rmind 			if (nextaobj) {
   1134  1.118     rmind 				uao_detach(&nextaobj->u_obj);
   1135  1.118     rmind 			}
   1136   1.27       chs 			return rv;
   1137   1.27       chs 		}
   1138   1.27       chs 
   1139  1.118     rmind 		aobj = nextaobj;
   1140   1.90        ad 		mutex_enter(&uao_list_lock);
   1141  1.118     rmind 	} while (aobj);
   1142   1.27       chs 
   1143   1.90        ad 	mutex_exit(&uao_list_lock);
   1144   1.87   thorpej 	return false;
   1145   1.27       chs }
   1146   1.27       chs 
   1147   1.27       chs /*
   1148   1.27       chs  * page in any pages from aobj in the given range.
   1149   1.27       chs  *
   1150   1.27       chs  * => aobj must be locked and is returned locked.
   1151   1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1152   1.27       chs  */
   1153   1.85   thorpej static bool
   1154   1.67   thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1155   1.27       chs {
   1156   1.85   thorpej 	bool rv;
   1157   1.27       chs 
   1158   1.27       chs 	if (UAO_USES_SWHASH(aobj)) {
   1159   1.27       chs 		struct uao_swhash_elt *elt;
   1160   1.65  christos 		int buck;
   1161   1.27       chs 
   1162   1.27       chs restart:
   1163   1.65  christos 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1164   1.65  christos 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1165   1.27       chs 			     elt != NULL;
   1166   1.27       chs 			     elt = LIST_NEXT(elt, list)) {
   1167   1.27       chs 				int i;
   1168   1.27       chs 
   1169   1.27       chs 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1170   1.27       chs 					int slot = elt->slots[i];
   1171   1.27       chs 
   1172   1.27       chs 					/*
   1173   1.27       chs 					 * if the slot isn't in range, skip it.
   1174   1.27       chs 					 */
   1175   1.46       chs 
   1176   1.41       chs 					if (slot < startslot ||
   1177   1.27       chs 					    slot >= endslot) {
   1178   1.27       chs 						continue;
   1179   1.27       chs 					}
   1180   1.27       chs 
   1181   1.27       chs 					/*
   1182   1.27       chs 					 * process the page,
   1183   1.27       chs 					 * the start over on this object
   1184   1.27       chs 					 * since the swhash elt
   1185   1.27       chs 					 * may have been freed.
   1186   1.27       chs 					 */
   1187   1.46       chs 
   1188   1.27       chs 					rv = uao_pagein_page(aobj,
   1189   1.27       chs 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1190   1.27       chs 					if (rv) {
   1191   1.27       chs 						return rv;
   1192   1.27       chs 					}
   1193   1.27       chs 					goto restart;
   1194   1.27       chs 				}
   1195   1.27       chs 			}
   1196   1.27       chs 		}
   1197   1.27       chs 	} else {
   1198   1.27       chs 		int i;
   1199   1.27       chs 
   1200   1.27       chs 		for (i = 0; i < aobj->u_pages; i++) {
   1201   1.27       chs 			int slot = aobj->u_swslots[i];
   1202   1.27       chs 
   1203   1.27       chs 			/*
   1204   1.27       chs 			 * if the slot isn't in range, skip it
   1205   1.27       chs 			 */
   1206   1.46       chs 
   1207   1.27       chs 			if (slot < startslot || slot >= endslot) {
   1208   1.27       chs 				continue;
   1209   1.27       chs 			}
   1210   1.27       chs 
   1211   1.27       chs 			/*
   1212   1.27       chs 			 * process the page.
   1213   1.27       chs 			 */
   1214   1.46       chs 
   1215   1.27       chs 			rv = uao_pagein_page(aobj, i);
   1216   1.27       chs 			if (rv) {
   1217   1.27       chs 				return rv;
   1218   1.27       chs 			}
   1219   1.27       chs 		}
   1220   1.27       chs 	}
   1221   1.27       chs 
   1222   1.87   thorpej 	return false;
   1223   1.27       chs }
   1224   1.27       chs 
   1225   1.27       chs /*
   1226  1.117     rmind  * uao_pagein_page: page in a single page from an anonymous UVM object.
   1227   1.27       chs  *
   1228  1.117     rmind  * => Returns true if pagein was aborted due to lack of memory.
   1229  1.117     rmind  * => Object must be locked and is returned locked.
   1230   1.27       chs  */
   1231   1.46       chs 
   1232   1.85   thorpej static bool
   1233   1.67   thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1234   1.27       chs {
   1235  1.117     rmind 	struct uvm_object *uobj = &aobj->u_obj;
   1236   1.27       chs 	struct vm_page *pg;
   1237   1.57        pk 	int rv, npages;
   1238   1.27       chs 
   1239   1.27       chs 	pg = NULL;
   1240   1.27       chs 	npages = 1;
   1241  1.117     rmind 
   1242  1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
   1243  1.128   msaitoh 	rv = uao_get(uobj, (voff_t)pageidx << PAGE_SHIFT, &pg, &npages,
   1244  1.117     rmind 	    0, VM_PROT_READ | VM_PROT_WRITE, 0, PGO_SYNCIO);
   1245   1.27       chs 
   1246   1.27       chs 	/*
   1247   1.27       chs 	 * relock and finish up.
   1248   1.27       chs 	 */
   1249   1.46       chs 
   1250  1.135        ad 	rw_enter(uobj->vmobjlock, RW_WRITER);
   1251   1.27       chs 	switch (rv) {
   1252   1.40       chs 	case 0:
   1253   1.27       chs 		break;
   1254   1.27       chs 
   1255   1.40       chs 	case EIO:
   1256   1.40       chs 	case ERESTART:
   1257   1.46       chs 
   1258   1.27       chs 		/*
   1259   1.27       chs 		 * nothing more to do on errors.
   1260   1.40       chs 		 * ERESTART can only mean that the anon was freed,
   1261   1.27       chs 		 * so again there's nothing to do.
   1262   1.27       chs 		 */
   1263   1.46       chs 
   1264   1.87   thorpej 		return false;
   1265   1.59        pk 
   1266   1.59        pk 	default:
   1267   1.87   thorpej 		return true;
   1268   1.27       chs 	}
   1269   1.27       chs 
   1270   1.27       chs 	/*
   1271   1.27       chs 	 * ok, we've got the page now.
   1272   1.27       chs 	 * mark it as dirty, clear its swslot and un-busy it.
   1273   1.27       chs 	 */
   1274   1.57        pk 	uao_dropswap(&aobj->u_obj, pageidx);
   1275   1.27       chs 
   1276   1.27       chs 	/*
   1277   1.80      yamt 	 * make sure it's on a page queue.
   1278   1.27       chs 	 */
   1279  1.133        ad 	uvm_pagelock(pg);
   1280  1.131        ad 	uvm_pageenqueue(pg);
   1281  1.138        ad 	uvm_pagewakeup(pg);
   1282  1.133        ad 	uvm_pageunlock(pg);
   1283   1.56      yamt 
   1284  1.138        ad 	pg->flags &= ~(PG_BUSY|PG_FAKE);
   1285  1.134        ad 	uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   1286  1.138        ad 	UVM_PAGE_OWN(pg, NULL);
   1287   1.56      yamt 
   1288   1.87   thorpej 	return false;
   1289    1.1       mrg }
   1290   1.72      yamt 
   1291   1.75      yamt /*
   1292   1.75      yamt  * uao_dropswap_range: drop swapslots in the range.
   1293   1.75      yamt  *
   1294   1.75      yamt  * => aobj must be locked and is returned locked.
   1295   1.75      yamt  * => start is inclusive.  end is exclusive.
   1296   1.75      yamt  */
   1297   1.75      yamt 
   1298   1.75      yamt void
   1299   1.75      yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1300   1.75      yamt {
   1301   1.75      yamt 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1302  1.117     rmind 	int swpgonlydelta = 0;
   1303   1.75      yamt 
   1304  1.141        ad 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
   1305  1.135        ad 	KASSERT(rw_write_held(uobj->vmobjlock));
   1306   1.75      yamt 
   1307   1.75      yamt 	if (end == 0) {
   1308   1.75      yamt 		end = INT64_MAX;
   1309   1.75      yamt 	}
   1310   1.75      yamt 
   1311   1.75      yamt 	if (UAO_USES_SWHASH(aobj)) {
   1312   1.75      yamt 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1313   1.75      yamt 		voff_t taghi;
   1314   1.75      yamt 		voff_t taglo;
   1315   1.75      yamt 
   1316   1.75      yamt 		taglo = UAO_SWHASH_ELT_TAG(start);
   1317   1.75      yamt 		taghi = UAO_SWHASH_ELT_TAG(end);
   1318   1.75      yamt 
   1319   1.75      yamt 		for (i = 0; i < hashbuckets; i++) {
   1320   1.75      yamt 			struct uao_swhash_elt *elt, *next;
   1321   1.75      yamt 
   1322   1.75      yamt 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1323   1.75      yamt 			     elt != NULL;
   1324   1.75      yamt 			     elt = next) {
   1325   1.75      yamt 				int startidx, endidx;
   1326   1.75      yamt 				int j;
   1327   1.75      yamt 
   1328   1.75      yamt 				next = LIST_NEXT(elt, list);
   1329   1.75      yamt 
   1330   1.75      yamt 				if (elt->tag < taglo || taghi < elt->tag) {
   1331   1.75      yamt 					continue;
   1332   1.75      yamt 				}
   1333   1.75      yamt 
   1334   1.75      yamt 				if (elt->tag == taglo) {
   1335   1.75      yamt 					startidx =
   1336   1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1337   1.75      yamt 				} else {
   1338   1.75      yamt 					startidx = 0;
   1339   1.75      yamt 				}
   1340   1.75      yamt 
   1341   1.75      yamt 				if (elt->tag == taghi) {
   1342   1.75      yamt 					endidx =
   1343   1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1344   1.75      yamt 				} else {
   1345   1.75      yamt 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1346   1.75      yamt 				}
   1347   1.75      yamt 
   1348   1.75      yamt 				for (j = startidx; j < endidx; j++) {
   1349   1.75      yamt 					int slot = elt->slots[j];
   1350   1.75      yamt 
   1351   1.75      yamt 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1352   1.75      yamt 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1353   1.75      yamt 					    + j) << PAGE_SHIFT) == NULL);
   1354   1.75      yamt 					if (slot > 0) {
   1355   1.75      yamt 						uvm_swap_free(slot, 1);
   1356   1.75      yamt 						swpgonlydelta++;
   1357   1.75      yamt 						KASSERT(elt->count > 0);
   1358   1.75      yamt 						elt->slots[j] = 0;
   1359   1.75      yamt 						elt->count--;
   1360   1.75      yamt 					}
   1361   1.75      yamt 				}
   1362   1.75      yamt 
   1363   1.75      yamt 				if (elt->count == 0) {
   1364   1.75      yamt 					LIST_REMOVE(elt, list);
   1365   1.75      yamt 					pool_put(&uao_swhash_elt_pool, elt);
   1366   1.75      yamt 				}
   1367   1.75      yamt 			}
   1368   1.75      yamt 		}
   1369   1.75      yamt 	} else {
   1370   1.75      yamt 		int i;
   1371   1.75      yamt 
   1372   1.75      yamt 		if (aobj->u_pages < end) {
   1373   1.75      yamt 			end = aobj->u_pages;
   1374   1.75      yamt 		}
   1375   1.75      yamt 		for (i = start; i < end; i++) {
   1376   1.75      yamt 			int slot = aobj->u_swslots[i];
   1377   1.75      yamt 
   1378   1.75      yamt 			if (slot > 0) {
   1379   1.75      yamt 				uvm_swap_free(slot, 1);
   1380   1.75      yamt 				swpgonlydelta++;
   1381   1.75      yamt 			}
   1382   1.75      yamt 		}
   1383   1.75      yamt 	}
   1384   1.75      yamt 
   1385   1.75      yamt 	/*
   1386   1.75      yamt 	 * adjust the counter of pages only in swap for all
   1387   1.75      yamt 	 * the swap slots we've freed.
   1388   1.75      yamt 	 */
   1389   1.75      yamt 
   1390   1.75      yamt 	if (swpgonlydelta > 0) {
   1391   1.75      yamt 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1392  1.129        ad 		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
   1393   1.75      yamt 	}
   1394   1.75      yamt }
   1395   1.75      yamt 
   1396   1.72      yamt #endif /* defined(VMSWAP) */
   1397