Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.103
      1  1.103        ad /*	$NetBSD: uvm_aobj.c,v 1.103 2008/06/25 13:21:04 ad Exp $	*/
      2    1.6       mrg 
      3    1.7       chs /*
      4    1.7       chs  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5    1.7       chs  *                    Washington University.
      6    1.7       chs  * All rights reserved.
      7    1.7       chs  *
      8    1.7       chs  * Redistribution and use in source and binary forms, with or without
      9    1.7       chs  * modification, are permitted provided that the following conditions
     10    1.7       chs  * are met:
     11    1.7       chs  * 1. Redistributions of source code must retain the above copyright
     12    1.7       chs  *    notice, this list of conditions and the following disclaimer.
     13    1.7       chs  * 2. Redistributions in binary form must reproduce the above copyright
     14    1.7       chs  *    notice, this list of conditions and the following disclaimer in the
     15    1.7       chs  *    documentation and/or other materials provided with the distribution.
     16    1.7       chs  * 3. All advertising materials mentioning features or use of this software
     17    1.7       chs  *    must display the following acknowledgement:
     18    1.7       chs  *      This product includes software developed by Charles D. Cranor and
     19    1.7       chs  *      Washington University.
     20    1.7       chs  * 4. The name of the author may not be used to endorse or promote products
     21    1.7       chs  *    derived from this software without specific prior written permission.
     22    1.7       chs  *
     23    1.7       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24    1.7       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25    1.7       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26    1.7       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27    1.7       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28    1.7       chs  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29    1.7       chs  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30    1.7       chs  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31    1.7       chs  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32    1.7       chs  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33    1.7       chs  *
     34    1.4       mrg  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35    1.4       mrg  */
     36    1.7       chs /*
     37    1.7       chs  * uvm_aobj.c: anonymous memory uvm_object pager
     38    1.7       chs  *
     39    1.7       chs  * author: Chuck Silvers <chuq (at) chuq.com>
     40    1.7       chs  * started: Jan-1998
     41    1.7       chs  *
     42    1.7       chs  * - design mostly from Chuck Cranor
     43    1.7       chs  */
     44   1.49     lukem 
     45   1.49     lukem #include <sys/cdefs.h>
     46  1.103        ad __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.103 2008/06/25 13:21:04 ad Exp $");
     47    1.7       chs 
     48    1.7       chs #include "opt_uvmhist.h"
     49    1.1       mrg 
     50    1.1       mrg #include <sys/param.h>
     51    1.1       mrg #include <sys/systm.h>
     52    1.1       mrg #include <sys/proc.h>
     53    1.1       mrg #include <sys/malloc.h>
     54   1.37       chs #include <sys/kernel.h>
     55   1.12   thorpej #include <sys/pool.h>
     56    1.1       mrg 
     57    1.1       mrg #include <uvm/uvm.h>
     58    1.1       mrg 
     59    1.1       mrg /*
     60    1.1       mrg  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61    1.1       mrg  * to keeping the list of resident pages, it also keeps a list of
     62    1.1       mrg  * allocated swap blocks.  depending on the size of the aobj this list
     63    1.1       mrg  * of allocated swap blocks is either stored in an array (small objects)
     64    1.1       mrg  * or in a hash table (large objects).
     65    1.1       mrg  */
     66    1.1       mrg 
     67    1.1       mrg /*
     68    1.1       mrg  * local structures
     69    1.1       mrg  */
     70    1.1       mrg 
     71    1.1       mrg /*
     72    1.1       mrg  * for hash tables, we break the address space of the aobj into blocks
     73    1.1       mrg  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74    1.1       mrg  * be a power of two.
     75    1.1       mrg  */
     76    1.1       mrg 
     77    1.1       mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
     78    1.1       mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79    1.1       mrg 
     80    1.1       mrg /* get the "tag" for this page index */
     81    1.1       mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82    1.1       mrg 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83    1.1       mrg 
     84   1.75      yamt #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     85   1.75      yamt 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     86   1.75      yamt 
     87    1.1       mrg /* given an ELT and a page index, find the swap slot */
     88    1.1       mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89   1.75      yamt 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     90    1.1       mrg 
     91    1.1       mrg /* given an ELT, return its pageidx base */
     92    1.1       mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93    1.1       mrg 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94    1.1       mrg 
     95    1.1       mrg /*
     96    1.1       mrg  * the swhash hash function
     97    1.1       mrg  */
     98   1.46       chs 
     99    1.1       mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
    100    1.1       mrg 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    101    1.1       mrg 			    & (AOBJ)->u_swhashmask)])
    102    1.1       mrg 
    103    1.1       mrg /*
    104    1.1       mrg  * the swhash threshhold determines if we will use an array or a
    105    1.1       mrg  * hash table to store the list of allocated swap blocks.
    106    1.1       mrg  */
    107    1.1       mrg 
    108    1.1       mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    109    1.1       mrg #define UAO_USES_SWHASH(AOBJ) \
    110    1.1       mrg 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    111    1.1       mrg 
    112    1.1       mrg /*
    113    1.3       chs  * the number of buckets in a swhash, with an upper bound
    114    1.1       mrg  */
    115   1.46       chs 
    116    1.1       mrg #define UAO_SWHASH_MAXBUCKETS 256
    117    1.1       mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
    118   1.46       chs 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    119    1.1       mrg 	     UAO_SWHASH_MAXBUCKETS))
    120    1.1       mrg 
    121    1.1       mrg 
    122    1.1       mrg /*
    123    1.1       mrg  * uao_swhash_elt: when a hash table is being used, this structure defines
    124    1.1       mrg  * the format of an entry in the bucket list.
    125    1.1       mrg  */
    126    1.1       mrg 
    127    1.1       mrg struct uao_swhash_elt {
    128    1.5       mrg 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    129   1.28    kleink 	voff_t tag;				/* our 'tag' */
    130    1.5       mrg 	int count;				/* our number of active slots */
    131    1.5       mrg 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    132    1.1       mrg };
    133    1.1       mrg 
    134    1.1       mrg /*
    135    1.1       mrg  * uao_swhash: the swap hash table structure
    136    1.1       mrg  */
    137    1.1       mrg 
    138    1.1       mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
    139    1.1       mrg 
    140   1.12   thorpej /*
    141   1.12   thorpej  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    142   1.64    simonb  * NOTE: Pages for this pool must not come from a pageable kernel map!
    143   1.12   thorpej  */
    144   1.64    simonb POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
    145   1.88        ad     "uaoeltpl", NULL, IPL_VM);
    146    1.1       mrg 
    147  1.103        ad static struct pool_cache uvm_aobj_cache;
    148  1.103        ad 
    149    1.1       mrg /*
    150    1.1       mrg  * uvm_aobj: the actual anon-backed uvm_object
    151    1.1       mrg  *
    152    1.1       mrg  * => the uvm_object is at the top of the structure, this allows
    153   1.46       chs  *   (struct uvm_aobj *) == (struct uvm_object *)
    154    1.1       mrg  * => only one of u_swslots and u_swhash is used in any given aobj
    155    1.1       mrg  */
    156    1.1       mrg 
    157    1.1       mrg struct uvm_aobj {
    158    1.5       mrg 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    159   1.79    cherry 	pgoff_t u_pages;	 /* number of pages in entire object */
    160    1.5       mrg 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    161    1.5       mrg 	int *u_swslots;		 /* array of offset->swapslot mappings */
    162    1.5       mrg 				 /*
    163    1.5       mrg 				  * hashtable of offset->swapslot mappings
    164    1.5       mrg 				  * (u_swhash is an array of bucket heads)
    165    1.5       mrg 				  */
    166    1.5       mrg 	struct uao_swhash *u_swhash;
    167    1.5       mrg 	u_long u_swhashmask;		/* mask for hashtable */
    168    1.5       mrg 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    169    1.1       mrg };
    170    1.1       mrg 
    171    1.1       mrg /*
    172   1.12   thorpej  * uvm_aobj_pool: pool of uvm_aobj structures
    173   1.12   thorpej  */
    174   1.54   thorpej MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
    175   1.12   thorpej 
    176   1.12   thorpej /*
    177    1.1       mrg  * local functions
    178    1.1       mrg  */
    179    1.1       mrg 
    180   1.62  junyoung static void	uao_free(struct uvm_aobj *);
    181   1.62  junyoung static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    182   1.62  junyoung 		    int *, int, vm_prot_t, int, int);
    183   1.86      matt static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    184   1.72      yamt 
    185   1.72      yamt #if defined(VMSWAP)
    186   1.72      yamt static struct uao_swhash_elt *uao_find_swhash_elt
    187   1.85   thorpej     (struct uvm_aobj *, int, bool);
    188   1.72      yamt 
    189   1.85   thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
    190   1.85   thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
    191   1.75      yamt static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    192   1.72      yamt #endif /* defined(VMSWAP) */
    193    1.1       mrg 
    194    1.1       mrg /*
    195    1.1       mrg  * aobj_pager
    196   1.41       chs  *
    197    1.1       mrg  * note that some functions (e.g. put) are handled elsewhere
    198    1.1       mrg  */
    199    1.1       mrg 
    200   1.95      yamt const struct uvm_pagerops aobj_pager = {
    201   1.94      yamt 	.pgo_reference = uao_reference,
    202   1.94      yamt 	.pgo_detach = uao_detach,
    203   1.94      yamt 	.pgo_get = uao_get,
    204   1.94      yamt 	.pgo_put = uao_put,
    205    1.1       mrg };
    206    1.1       mrg 
    207    1.1       mrg /*
    208    1.1       mrg  * uao_list: global list of active aobjs, locked by uao_list_lock
    209    1.1       mrg  */
    210    1.1       mrg 
    211    1.1       mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    212   1.90        ad static kmutex_t uao_list_lock;
    213    1.1       mrg 
    214    1.1       mrg /*
    215    1.1       mrg  * functions
    216    1.1       mrg  */
    217    1.1       mrg 
    218    1.1       mrg /*
    219    1.1       mrg  * hash table/array related functions
    220    1.1       mrg  */
    221    1.1       mrg 
    222   1.72      yamt #if defined(VMSWAP)
    223   1.72      yamt 
    224    1.1       mrg /*
    225  1.100        ad  * uao_hashinit: limited version of hashinit() that uses malloc(). XXX
    226  1.100        ad  */
    227  1.100        ad static void *
    228  1.100        ad uao_hashinit(u_int elements, int mflags, u_long *hashmask)
    229  1.100        ad {
    230  1.100        ad 	LIST_HEAD(, generic) *elm, *emx;
    231  1.100        ad 	u_long hashsize;
    232  1.100        ad 	void *p;
    233  1.100        ad 
    234  1.100        ad 	for (hashsize = 1; hashsize < elements; hashsize <<= 1)
    235  1.100        ad 		continue;
    236  1.100        ad 	if ((p = malloc(hashsize * sizeof(*elm), M_UVMAOBJ, mflags)) == NULL)
    237  1.100        ad 		return (NULL);
    238  1.100        ad 	for (elm = p, emx = elm + hashsize; elm < emx; elm++)
    239  1.100        ad 		LIST_INIT(elm);
    240  1.100        ad 	*hashmask = hashsize - 1;
    241  1.100        ad 
    242  1.100        ad 	return (p);
    243  1.100        ad }
    244  1.100        ad 
    245  1.100        ad /*
    246    1.1       mrg  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    247    1.1       mrg  * offset.
    248    1.1       mrg  *
    249    1.1       mrg  * => the object should be locked by the caller
    250    1.1       mrg  */
    251    1.1       mrg 
    252    1.5       mrg static struct uao_swhash_elt *
    253   1.85   thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    254    1.5       mrg {
    255    1.5       mrg 	struct uao_swhash *swhash;
    256    1.5       mrg 	struct uao_swhash_elt *elt;
    257   1.28    kleink 	voff_t page_tag;
    258    1.1       mrg 
    259   1.45       chs 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    260   1.45       chs 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    261    1.1       mrg 
    262    1.5       mrg 	/*
    263    1.5       mrg 	 * now search the bucket for the requested tag
    264    1.5       mrg 	 */
    265   1.45       chs 
    266   1.37       chs 	LIST_FOREACH(elt, swhash, list) {
    267   1.45       chs 		if (elt->tag == page_tag) {
    268   1.45       chs 			return elt;
    269   1.45       chs 		}
    270    1.5       mrg 	}
    271   1.45       chs 	if (!create) {
    272    1.5       mrg 		return NULL;
    273   1.45       chs 	}
    274    1.5       mrg 
    275    1.5       mrg 	/*
    276   1.12   thorpej 	 * allocate a new entry for the bucket and init/insert it in
    277    1.5       mrg 	 */
    278   1.45       chs 
    279   1.45       chs 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    280   1.45       chs 	if (elt == NULL) {
    281   1.45       chs 		return NULL;
    282   1.45       chs 	}
    283    1.5       mrg 	LIST_INSERT_HEAD(swhash, elt, list);
    284    1.5       mrg 	elt->tag = page_tag;
    285    1.5       mrg 	elt->count = 0;
    286    1.9     perry 	memset(elt->slots, 0, sizeof(elt->slots));
    287   1.45       chs 	return elt;
    288    1.1       mrg }
    289    1.1       mrg 
    290    1.1       mrg /*
    291    1.1       mrg  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    292    1.1       mrg  *
    293   1.41       chs  * => object must be locked by caller
    294    1.1       mrg  */
    295   1.46       chs 
    296   1.46       chs int
    297   1.67   thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
    298    1.1       mrg {
    299   1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    300   1.46       chs 	struct uao_swhash_elt *elt;
    301    1.1       mrg 
    302    1.5       mrg 	/*
    303    1.5       mrg 	 * if noswap flag is set, then we never return a slot
    304    1.5       mrg 	 */
    305    1.1       mrg 
    306    1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    307    1.5       mrg 		return(0);
    308    1.1       mrg 
    309    1.5       mrg 	/*
    310    1.5       mrg 	 * if hashing, look in hash table.
    311    1.5       mrg 	 */
    312    1.1       mrg 
    313    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    314   1.87   thorpej 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    315    1.5       mrg 		if (elt)
    316    1.5       mrg 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    317    1.5       mrg 		else
    318   1.31   thorpej 			return(0);
    319    1.5       mrg 	}
    320    1.1       mrg 
    321   1.41       chs 	/*
    322    1.5       mrg 	 * otherwise, look in the array
    323    1.5       mrg 	 */
    324   1.46       chs 
    325    1.5       mrg 	return(aobj->u_swslots[pageidx]);
    326    1.1       mrg }
    327    1.1       mrg 
    328    1.1       mrg /*
    329    1.1       mrg  * uao_set_swslot: set the swap slot for a page in an aobj.
    330    1.1       mrg  *
    331    1.1       mrg  * => setting a slot to zero frees the slot
    332    1.1       mrg  * => object must be locked by caller
    333   1.45       chs  * => we return the old slot number, or -1 if we failed to allocate
    334   1.45       chs  *    memory to record the new slot number
    335    1.1       mrg  */
    336   1.46       chs 
    337    1.5       mrg int
    338   1.67   thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    339    1.5       mrg {
    340    1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    341   1.45       chs 	struct uao_swhash_elt *elt;
    342    1.5       mrg 	int oldslot;
    343    1.5       mrg 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    344    1.5       mrg 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    345    1.5       mrg 	    aobj, pageidx, slot, 0);
    346    1.1       mrg 
    347    1.5       mrg 	/*
    348   1.46       chs 	 * if noswap flag is set, then we can't set a non-zero slot.
    349    1.5       mrg 	 */
    350    1.1       mrg 
    351    1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    352    1.5       mrg 		if (slot == 0)
    353   1.46       chs 			return(0);
    354    1.1       mrg 
    355    1.5       mrg 		printf("uao_set_swslot: uobj = %p\n", uobj);
    356   1.46       chs 		panic("uao_set_swslot: NOSWAP object");
    357    1.5       mrg 	}
    358    1.1       mrg 
    359    1.5       mrg 	/*
    360    1.5       mrg 	 * are we using a hash table?  if so, add it in the hash.
    361    1.5       mrg 	 */
    362    1.1       mrg 
    363    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    364   1.39       chs 
    365   1.12   thorpej 		/*
    366   1.12   thorpej 		 * Avoid allocating an entry just to free it again if
    367   1.12   thorpej 		 * the page had not swap slot in the first place, and
    368   1.12   thorpej 		 * we are freeing.
    369   1.12   thorpej 		 */
    370   1.39       chs 
    371   1.46       chs 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    372   1.12   thorpej 		if (elt == NULL) {
    373   1.45       chs 			return slot ? -1 : 0;
    374   1.12   thorpej 		}
    375    1.5       mrg 
    376    1.5       mrg 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    377    1.5       mrg 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    378    1.5       mrg 
    379    1.5       mrg 		/*
    380    1.5       mrg 		 * now adjust the elt's reference counter and free it if we've
    381    1.5       mrg 		 * dropped it to zero.
    382    1.5       mrg 		 */
    383    1.5       mrg 
    384    1.5       mrg 		if (slot) {
    385    1.5       mrg 			if (oldslot == 0)
    386    1.5       mrg 				elt->count++;
    387   1.45       chs 		} else {
    388   1.45       chs 			if (oldslot)
    389    1.5       mrg 				elt->count--;
    390    1.5       mrg 
    391    1.5       mrg 			if (elt->count == 0) {
    392    1.5       mrg 				LIST_REMOVE(elt, list);
    393   1.12   thorpej 				pool_put(&uao_swhash_elt_pool, elt);
    394    1.5       mrg 			}
    395    1.5       mrg 		}
    396   1.41       chs 	} else {
    397    1.5       mrg 		/* we are using an array */
    398    1.5       mrg 		oldslot = aobj->u_swslots[pageidx];
    399    1.5       mrg 		aobj->u_swslots[pageidx] = slot;
    400    1.5       mrg 	}
    401    1.5       mrg 	return (oldslot);
    402    1.1       mrg }
    403    1.1       mrg 
    404   1.72      yamt #endif /* defined(VMSWAP) */
    405   1.72      yamt 
    406    1.1       mrg /*
    407    1.1       mrg  * end of hash/array functions
    408    1.1       mrg  */
    409    1.1       mrg 
    410    1.1       mrg /*
    411    1.1       mrg  * uao_free: free all resources held by an aobj, and then free the aobj
    412    1.1       mrg  *
    413    1.1       mrg  * => the aobj should be dead
    414    1.1       mrg  */
    415   1.46       chs 
    416    1.1       mrg static void
    417   1.67   thorpej uao_free(struct uvm_aobj *aobj)
    418    1.1       mrg {
    419   1.46       chs 	int swpgonlydelta = 0;
    420    1.1       mrg 
    421   1.96        ad 
    422   1.93     pooka #if defined(VMSWAP)
    423   1.93     pooka 	uao_dropswap_range1(aobj, 0, 0);
    424   1.93     pooka #endif /* defined(VMSWAP) */
    425   1.93     pooka 
    426   1.96        ad 	mutex_exit(&aobj->u_obj.vmobjlock);
    427   1.72      yamt 
    428   1.72      yamt #if defined(VMSWAP)
    429    1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    430    1.1       mrg 
    431    1.5       mrg 		/*
    432   1.75      yamt 		 * free the hash table itself.
    433    1.5       mrg 		 */
    434   1.46       chs 
    435   1.34   thorpej 		free(aobj->u_swhash, M_UVMAOBJ);
    436    1.5       mrg 	} else {
    437    1.5       mrg 
    438    1.5       mrg 		/*
    439   1.75      yamt 		 * free the array itsself.
    440    1.5       mrg 		 */
    441    1.5       mrg 
    442   1.34   thorpej 		free(aobj->u_swslots, M_UVMAOBJ);
    443    1.1       mrg 	}
    444   1.72      yamt #endif /* defined(VMSWAP) */
    445   1.72      yamt 
    446    1.5       mrg 	/*
    447    1.5       mrg 	 * finally free the aobj itself
    448    1.5       mrg 	 */
    449   1.46       chs 
    450   1.96        ad 	UVM_OBJ_DESTROY(&aobj->u_obj);
    451  1.103        ad 	pool_cache_put(&uvm_aobj_cache, aobj);
    452   1.46       chs 
    453   1.46       chs 	/*
    454   1.46       chs 	 * adjust the counter of pages only in swap for all
    455   1.46       chs 	 * the swap slots we've freed.
    456   1.46       chs 	 */
    457   1.46       chs 
    458   1.48       chs 	if (swpgonlydelta > 0) {
    459   1.90        ad 		mutex_enter(&uvm_swap_data_lock);
    460   1.48       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    461   1.48       chs 		uvmexp.swpgonly -= swpgonlydelta;
    462   1.90        ad 		mutex_exit(&uvm_swap_data_lock);
    463   1.48       chs 	}
    464    1.1       mrg }
    465    1.1       mrg 
    466    1.1       mrg /*
    467    1.1       mrg  * pager functions
    468    1.1       mrg  */
    469    1.1       mrg 
    470    1.1       mrg /*
    471    1.1       mrg  * uao_create: create an aobj of the given size and return its uvm_object.
    472    1.1       mrg  *
    473    1.1       mrg  * => for normal use, flags are always zero
    474    1.1       mrg  * => for the kernel object, the flags are:
    475    1.1       mrg  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    476    1.1       mrg  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    477    1.1       mrg  */
    478   1.46       chs 
    479    1.5       mrg struct uvm_object *
    480   1.67   thorpej uao_create(vsize_t size, int flags)
    481    1.5       mrg {
    482   1.46       chs 	static struct uvm_aobj kernel_object_store;
    483   1.46       chs 	static int kobj_alloced = 0;
    484   1.79    cherry 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    485    1.5       mrg 	struct uvm_aobj *aobj;
    486   1.66      yamt 	int refs;
    487    1.1       mrg 
    488    1.5       mrg 	/*
    489   1.27       chs 	 * malloc a new aobj unless we are asked for the kernel object
    490   1.27       chs 	 */
    491    1.5       mrg 
    492   1.46       chs 	if (flags & UAO_FLAG_KERNOBJ) {
    493   1.46       chs 		KASSERT(!kobj_alloced);
    494    1.5       mrg 		aobj = &kernel_object_store;
    495    1.5       mrg 		aobj->u_pages = pages;
    496   1.46       chs 		aobj->u_flags = UAO_FLAG_NOSWAP;
    497   1.66      yamt 		refs = UVM_OBJ_KERN;
    498    1.5       mrg 		kobj_alloced = UAO_FLAG_KERNOBJ;
    499    1.5       mrg 	} else if (flags & UAO_FLAG_KERNSWAP) {
    500   1.46       chs 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    501    1.5       mrg 		aobj = &kernel_object_store;
    502    1.5       mrg 		kobj_alloced = UAO_FLAG_KERNSWAP;
    503   1.66      yamt 		refs = 0xdeadbeaf; /* XXX: gcc */
    504   1.46       chs 	} else {
    505  1.103        ad 		aobj = pool_cache_get(&uvm_aobj_cache, PR_WAITOK);
    506    1.5       mrg 		aobj->u_pages = pages;
    507   1.46       chs 		aobj->u_flags = 0;
    508   1.66      yamt 		refs = 1;
    509    1.5       mrg 	}
    510    1.1       mrg 
    511    1.5       mrg 	/*
    512    1.5       mrg  	 * allocate hash/array if necessary
    513    1.5       mrg  	 *
    514    1.5       mrg  	 * note: in the KERNSWAP case no need to worry about locking since
    515    1.5       mrg  	 * we are still booting we should be the only thread around.
    516    1.5       mrg  	 */
    517   1.46       chs 
    518    1.5       mrg 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    519   1.72      yamt #if defined(VMSWAP)
    520    1.5       mrg 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    521    1.5       mrg 		    M_NOWAIT : M_WAITOK;
    522    1.5       mrg 
    523    1.5       mrg 		/* allocate hash table or array depending on object size */
    524   1.27       chs 		if (UAO_USES_SWHASH(aobj)) {
    525  1.100        ad 			aobj->u_swhash = uao_hashinit(UAO_SWHASH_BUCKETS(aobj),
    526  1.100        ad 			    mflags, &aobj->u_swhashmask);
    527    1.5       mrg 			if (aobj->u_swhash == NULL)
    528    1.5       mrg 				panic("uao_create: hashinit swhash failed");
    529    1.5       mrg 		} else {
    530   1.34   thorpej 			aobj->u_swslots = malloc(pages * sizeof(int),
    531    1.5       mrg 			    M_UVMAOBJ, mflags);
    532    1.5       mrg 			if (aobj->u_swslots == NULL)
    533    1.5       mrg 				panic("uao_create: malloc swslots failed");
    534    1.9     perry 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    535    1.5       mrg 		}
    536   1.72      yamt #endif /* defined(VMSWAP) */
    537    1.5       mrg 
    538    1.5       mrg 		if (flags) {
    539    1.5       mrg 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    540    1.5       mrg 			return(&aobj->u_obj);
    541    1.5       mrg 		}
    542    1.5       mrg 	}
    543    1.5       mrg 
    544    1.5       mrg 	/*
    545    1.5       mrg  	 * init aobj fields
    546    1.5       mrg  	 */
    547   1.46       chs 
    548   1.66      yamt 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
    549    1.1       mrg 
    550    1.5       mrg 	/*
    551    1.5       mrg  	 * now that aobj is ready, add it to the global list
    552    1.5       mrg  	 */
    553   1.46       chs 
    554   1.90        ad 	mutex_enter(&uao_list_lock);
    555    1.5       mrg 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    556   1.90        ad 	mutex_exit(&uao_list_lock);
    557    1.5       mrg 	return(&aobj->u_obj);
    558    1.1       mrg }
    559    1.1       mrg 
    560    1.1       mrg 
    561    1.1       mrg 
    562    1.1       mrg /*
    563    1.1       mrg  * uao_init: set up aobj pager subsystem
    564    1.1       mrg  *
    565    1.1       mrg  * => called at boot time from uvm_pager_init()
    566    1.1       mrg  */
    567   1.46       chs 
    568   1.27       chs void
    569   1.46       chs uao_init(void)
    570    1.5       mrg {
    571   1.12   thorpej 	static int uao_initialized;
    572   1.12   thorpej 
    573   1.12   thorpej 	if (uao_initialized)
    574   1.12   thorpej 		return;
    575   1.87   thorpej 	uao_initialized = true;
    576    1.5       mrg 	LIST_INIT(&uao_list);
    577   1.96        ad 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    578  1.103        ad 	pool_cache_bootstrap(&uvm_aobj_cache, sizeof(struct uvm_aobj), 0, 0,
    579  1.103        ad 	    0, "aobj", NULL, IPL_NONE, NULL, NULL, NULL);
    580    1.1       mrg }
    581    1.1       mrg 
    582    1.1       mrg /*
    583    1.1       mrg  * uao_reference: add a ref to an aobj
    584    1.1       mrg  *
    585   1.27       chs  * => aobj must be unlocked
    586   1.27       chs  * => just lock it and call the locked version
    587    1.1       mrg  */
    588   1.46       chs 
    589    1.5       mrg void
    590   1.67   thorpej uao_reference(struct uvm_object *uobj)
    591    1.1       mrg {
    592  1.101        ad 
    593  1.101        ad 	/*
    594  1.101        ad  	 * kernel_object already has plenty of references, leave it alone.
    595  1.101        ad  	 */
    596  1.101        ad 
    597  1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    598  1.101        ad 		return;
    599  1.101        ad 
    600   1.96        ad 	mutex_enter(&uobj->vmobjlock);
    601   1.27       chs 	uao_reference_locked(uobj);
    602   1.96        ad 	mutex_exit(&uobj->vmobjlock);
    603   1.27       chs }
    604   1.27       chs 
    605   1.27       chs /*
    606   1.27       chs  * uao_reference_locked: add a ref to an aobj that is already locked
    607   1.27       chs  *
    608   1.27       chs  * => aobj must be locked
    609   1.27       chs  * this needs to be separate from the normal routine
    610   1.27       chs  * since sometimes we need to add a reference to an aobj when
    611   1.27       chs  * it's already locked.
    612   1.27       chs  */
    613   1.46       chs 
    614   1.27       chs void
    615   1.67   thorpej uao_reference_locked(struct uvm_object *uobj)
    616   1.27       chs {
    617    1.5       mrg 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    618    1.1       mrg 
    619    1.5       mrg 	/*
    620    1.5       mrg  	 * kernel_object already has plenty of references, leave it alone.
    621    1.5       mrg  	 */
    622    1.1       mrg 
    623   1.20   thorpej 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    624    1.5       mrg 		return;
    625    1.1       mrg 
    626   1.46       chs 	uobj->uo_refs++;
    627   1.41       chs 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    628   1.27       chs 		    uobj, uobj->uo_refs,0,0);
    629    1.1       mrg }
    630    1.1       mrg 
    631    1.1       mrg /*
    632    1.1       mrg  * uao_detach: drop a reference to an aobj
    633    1.1       mrg  *
    634   1.27       chs  * => aobj must be unlocked
    635   1.27       chs  * => just lock it and call the locked version
    636    1.1       mrg  */
    637   1.46       chs 
    638    1.5       mrg void
    639   1.67   thorpej uao_detach(struct uvm_object *uobj)
    640    1.5       mrg {
    641  1.101        ad 
    642  1.101        ad 	/*
    643  1.101        ad  	 * detaching from kernel_object is a noop.
    644  1.101        ad  	 */
    645  1.101        ad 
    646  1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    647  1.102        ad 		return;
    648  1.101        ad 
    649   1.96        ad 	mutex_enter(&uobj->vmobjlock);
    650   1.27       chs 	uao_detach_locked(uobj);
    651   1.27       chs }
    652   1.27       chs 
    653   1.27       chs /*
    654   1.27       chs  * uao_detach_locked: drop a reference to an aobj
    655   1.27       chs  *
    656   1.27       chs  * => aobj must be locked, and is unlocked (or freed) upon return.
    657   1.27       chs  * this needs to be separate from the normal routine
    658   1.27       chs  * since sometimes we need to detach from an aobj when
    659   1.27       chs  * it's already locked.
    660   1.27       chs  */
    661   1.46       chs 
    662   1.27       chs void
    663   1.67   thorpej uao_detach_locked(struct uvm_object *uobj)
    664   1.27       chs {
    665    1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    666   1.46       chs 	struct vm_page *pg;
    667    1.5       mrg 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    668    1.1       mrg 
    669    1.5       mrg 	/*
    670    1.5       mrg  	 * detaching from kernel_object is a noop.
    671    1.5       mrg  	 */
    672   1.46       chs 
    673   1.27       chs 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    674   1.96        ad 		mutex_exit(&uobj->vmobjlock);
    675    1.5       mrg 		return;
    676   1.27       chs 	}
    677    1.5       mrg 
    678    1.5       mrg 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    679   1.46       chs 	uobj->uo_refs--;
    680   1.46       chs 	if (uobj->uo_refs) {
    681   1.96        ad 		mutex_exit(&uobj->vmobjlock);
    682    1.5       mrg 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    683    1.5       mrg 		return;
    684    1.5       mrg 	}
    685    1.5       mrg 
    686    1.5       mrg 	/*
    687    1.5       mrg  	 * remove the aobj from the global list.
    688    1.5       mrg  	 */
    689   1.46       chs 
    690   1.92        ad 	mutex_enter(&uao_list_lock);
    691    1.5       mrg 	LIST_REMOVE(aobj, u_list);
    692   1.92        ad 	mutex_exit(&uao_list_lock);
    693    1.5       mrg 
    694    1.5       mrg 	/*
    695   1.46       chs  	 * free all the pages left in the aobj.  for each page,
    696   1.46       chs 	 * when the page is no longer busy (and thus after any disk i/o that
    697   1.46       chs 	 * it's involved in is complete), release any swap resources and
    698   1.46       chs 	 * free the page itself.
    699    1.5       mrg  	 */
    700   1.46       chs 
    701   1.96        ad 	mutex_enter(&uvm_pageqlock);
    702   1.46       chs 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    703   1.46       chs 		pmap_page_protect(pg, VM_PROT_NONE);
    704    1.5       mrg 		if (pg->flags & PG_BUSY) {
    705   1.46       chs 			pg->flags |= PG_WANTED;
    706   1.96        ad 			mutex_exit(&uvm_pageqlock);
    707   1.87   thorpej 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
    708   1.46       chs 			    "uao_det", 0);
    709   1.96        ad 			mutex_enter(&uobj->vmobjlock);
    710   1.96        ad 			mutex_enter(&uvm_pageqlock);
    711    1.5       mrg 			continue;
    712    1.5       mrg 		}
    713   1.18       chs 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    714    1.5       mrg 		uvm_pagefree(pg);
    715    1.5       mrg 	}
    716   1.96        ad 	mutex_exit(&uvm_pageqlock);
    717    1.1       mrg 
    718    1.5       mrg 	/*
    719   1.46       chs  	 * finally, free the aobj itself.
    720    1.5       mrg  	 */
    721    1.1       mrg 
    722    1.5       mrg 	uao_free(aobj);
    723    1.5       mrg }
    724    1.1       mrg 
    725    1.1       mrg /*
    726   1.46       chs  * uao_put: flush pages out of a uvm object
    727   1.22   thorpej  *
    728   1.22   thorpej  * => object should be locked by caller.  we may _unlock_ the object
    729   1.22   thorpej  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    730   1.22   thorpej  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    731   1.22   thorpej  *	XXXJRT a page, we simply just deactivate it.  Should probably
    732   1.22   thorpej  *	XXXJRT handle this better, in the future (although "flushing"
    733   1.22   thorpej  *	XXXJRT anonymous memory isn't terribly important).
    734   1.22   thorpej  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    735   1.22   thorpej  *	or block.
    736   1.22   thorpej  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    737   1.22   thorpej  *	for flushing.
    738   1.22   thorpej  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    739   1.22   thorpej  *	that new pages are inserted on the tail end of the list.  thus,
    740   1.22   thorpej  *	we can make a complete pass through the object in one go by starting
    741   1.22   thorpej  *	at the head and working towards the tail (new pages are put in
    742   1.22   thorpej  *	front of us).
    743   1.22   thorpej  * => NOTE: we are allowed to lock the page queues, so the caller
    744   1.22   thorpej  *	must not be holding the lock on them [e.g. pagedaemon had
    745   1.22   thorpej  *	better not call us with the queues locked]
    746   1.86      matt  * => we return 0 unless we encountered some sort of I/O error
    747   1.22   thorpej  *	XXXJRT currently never happens, as we never directly initiate
    748   1.22   thorpej  *	XXXJRT I/O
    749   1.22   thorpej  *
    750   1.22   thorpej  * note on page traversal:
    751   1.22   thorpej  *	we can traverse the pages in an object either by going down the
    752   1.22   thorpej  *	linked list in "uobj->memq", or we can go over the address range
    753   1.22   thorpej  *	by page doing hash table lookups for each address.  depending
    754   1.22   thorpej  *	on how many pages are in the object it may be cheaper to do one
    755   1.22   thorpej  *	or the other.  we set "by_list" to true if we are using memq.
    756   1.22   thorpej  *	if the cost of a hash lookup was equal to the cost of the list
    757   1.22   thorpej  *	traversal we could compare the number of pages in the start->stop
    758   1.22   thorpej  *	range to the total number of pages in the object.  however, it
    759   1.22   thorpej  *	seems that a hash table lookup is more expensive than the linked
    760   1.22   thorpej  *	list traversal, so we multiply the number of pages in the
    761   1.22   thorpej  *	start->stop range by a penalty which we define below.
    762    1.1       mrg  */
    763   1.22   thorpej 
    764   1.68   thorpej static int
    765   1.67   thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    766    1.5       mrg {
    767   1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    768   1.51     enami 	struct vm_page *pg, *nextpg, curmp, endmp;
    769   1.85   thorpej 	bool by_list;
    770   1.28    kleink 	voff_t curoff;
    771   1.46       chs 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    772   1.22   thorpej 
    773   1.96        ad 	KASSERT(mutex_owned(&uobj->vmobjlock));
    774   1.96        ad 
    775   1.46       chs 	curoff = 0;
    776   1.22   thorpej 	if (flags & PGO_ALLPAGES) {
    777   1.22   thorpej 		start = 0;
    778   1.22   thorpej 		stop = aobj->u_pages << PAGE_SHIFT;
    779   1.86      matt 		by_list = true;		/* always go by the list */
    780   1.22   thorpej 	} else {
    781   1.22   thorpej 		start = trunc_page(start);
    782   1.71      yamt 		if (stop == 0) {
    783   1.71      yamt 			stop = aobj->u_pages << PAGE_SHIFT;
    784   1.71      yamt 		} else {
    785   1.71      yamt 			stop = round_page(stop);
    786   1.71      yamt 		}
    787   1.22   thorpej 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    788   1.22   thorpej 			printf("uao_flush: strange, got an out of range "
    789   1.22   thorpej 			    "flush (fixed)\n");
    790   1.22   thorpej 			stop = aobj->u_pages << PAGE_SHIFT;
    791   1.22   thorpej 		}
    792   1.22   thorpej 		by_list = (uobj->uo_npages <=
    793   1.46       chs 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
    794   1.22   thorpej 	}
    795   1.22   thorpej 	UVMHIST_LOG(maphist,
    796   1.22   thorpej 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    797   1.22   thorpej 	    start, stop, by_list, flags);
    798    1.1       mrg 
    799    1.5       mrg 	/*
    800   1.22   thorpej 	 * Don't need to do any work here if we're not freeing
    801   1.22   thorpej 	 * or deactivating pages.
    802   1.22   thorpej 	 */
    803   1.46       chs 
    804   1.22   thorpej 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    805   1.96        ad 		mutex_exit(&uobj->vmobjlock);
    806   1.46       chs 		return 0;
    807   1.22   thorpej 	}
    808   1.22   thorpej 
    809    1.5       mrg 	/*
    810   1.51     enami 	 * Initialize the marker pages.  See the comment in
    811   1.51     enami 	 * genfs_putpages() also.
    812   1.51     enami 	 */
    813   1.51     enami 
    814   1.51     enami 	curmp.uobject = uobj;
    815   1.51     enami 	curmp.offset = (voff_t)-1;
    816   1.51     enami 	curmp.flags = PG_BUSY;
    817   1.51     enami 	endmp.uobject = uobj;
    818   1.51     enami 	endmp.offset = (voff_t)-1;
    819   1.51     enami 	endmp.flags = PG_BUSY;
    820   1.51     enami 
    821   1.51     enami 	/*
    822   1.46       chs 	 * now do it.  note: we must update nextpg in the body of loop or we
    823   1.51     enami 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    824   1.51     enami 	 * because we may free "pg" before doing the next loop.
    825   1.21   thorpej 	 */
    826   1.22   thorpej 
    827   1.22   thorpej 	if (by_list) {
    828  1.102        ad 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    829   1.51     enami 		nextpg = TAILQ_FIRST(&uobj->memq);
    830   1.89        ad 		uvm_lwp_hold(curlwp);
    831   1.22   thorpej 	} else {
    832   1.22   thorpej 		curoff = start;
    833   1.52       scw 		nextpg = NULL;	/* Quell compiler warning */
    834   1.22   thorpej 	}
    835   1.22   thorpej 
    836   1.99        ad 	/* locked: uobj */
    837   1.51     enami 	for (;;) {
    838   1.22   thorpej 		if (by_list) {
    839   1.51     enami 			pg = nextpg;
    840   1.51     enami 			if (pg == &endmp)
    841   1.51     enami 				break;
    842  1.102        ad 			nextpg = TAILQ_NEXT(pg, listq.queue);
    843   1.46       chs 			if (pg->offset < start || pg->offset >= stop)
    844   1.22   thorpej 				continue;
    845   1.22   thorpej 		} else {
    846   1.51     enami 			if (curoff < stop) {
    847   1.51     enami 				pg = uvm_pagelookup(uobj, curoff);
    848   1.51     enami 				curoff += PAGE_SIZE;
    849   1.51     enami 			} else
    850   1.51     enami 				break;
    851   1.46       chs 			if (pg == NULL)
    852   1.22   thorpej 				continue;
    853   1.22   thorpej 		}
    854   1.98      yamt 
    855   1.98      yamt 		/*
    856   1.98      yamt 		 * wait and try again if the page is busy.
    857   1.98      yamt 		 */
    858   1.98      yamt 
    859   1.98      yamt 		if (pg->flags & PG_BUSY) {
    860   1.98      yamt 			if (by_list) {
    861  1.102        ad 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
    862   1.98      yamt 			}
    863   1.98      yamt 			pg->flags |= PG_WANTED;
    864   1.98      yamt 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    865   1.98      yamt 			    "uao_put", 0);
    866   1.98      yamt 			mutex_enter(&uobj->vmobjlock);
    867   1.98      yamt 			if (by_list) {
    868  1.102        ad 				nextpg = TAILQ_NEXT(&curmp, listq.queue);
    869   1.98      yamt 				TAILQ_REMOVE(&uobj->memq, &curmp,
    870  1.102        ad 				    listq.queue);
    871   1.98      yamt 			} else
    872   1.98      yamt 				curoff -= PAGE_SIZE;
    873   1.98      yamt 			continue;
    874   1.98      yamt 		}
    875   1.98      yamt 
    876   1.46       chs 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    877   1.41       chs 
    878   1.22   thorpej 		/*
    879   1.22   thorpej 		 * XXX In these first 3 cases, we always just
    880   1.22   thorpej 		 * XXX deactivate the page.  We may want to
    881   1.22   thorpej 		 * XXX handle the different cases more specifically
    882   1.22   thorpej 		 * XXX in the future.
    883   1.22   thorpej 		 */
    884   1.46       chs 
    885   1.22   thorpej 		case PGO_CLEANIT|PGO_FREE:
    886   1.22   thorpej 		case PGO_CLEANIT|PGO_DEACTIVATE:
    887   1.22   thorpej 		case PGO_DEACTIVATE:
    888   1.25   thorpej  deactivate_it:
    889   1.98      yamt 			mutex_enter(&uvm_pageqlock);
    890   1.83      yamt 			/* skip the page if it's wired */
    891   1.98      yamt 			if (pg->wire_count == 0) {
    892   1.98      yamt 				uvm_pagedeactivate(pg);
    893   1.98      yamt 			}
    894   1.98      yamt 			mutex_exit(&uvm_pageqlock);
    895   1.98      yamt 			break;
    896   1.22   thorpej 
    897   1.22   thorpej 		case PGO_FREE:
    898   1.25   thorpej 			/*
    899   1.25   thorpej 			 * If there are multiple references to
    900   1.25   thorpej 			 * the object, just deactivate the page.
    901   1.25   thorpej 			 */
    902   1.46       chs 
    903   1.25   thorpej 			if (uobj->uo_refs > 1)
    904   1.25   thorpej 				goto deactivate_it;
    905   1.25   thorpej 
    906   1.22   thorpej 			/*
    907   1.98      yamt 			 * free the swap slot and the page.
    908   1.22   thorpej 			 */
    909   1.46       chs 
    910   1.46       chs 			pmap_page_protect(pg, VM_PROT_NONE);
    911   1.75      yamt 
    912   1.75      yamt 			/*
    913   1.75      yamt 			 * freeing swapslot here is not strictly necessary.
    914   1.75      yamt 			 * however, leaving it here doesn't save much
    915   1.75      yamt 			 * because we need to update swap accounting anyway.
    916   1.75      yamt 			 */
    917   1.75      yamt 
    918   1.46       chs 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    919   1.98      yamt 			mutex_enter(&uvm_pageqlock);
    920   1.46       chs 			uvm_pagefree(pg);
    921   1.98      yamt 			mutex_exit(&uvm_pageqlock);
    922   1.98      yamt 			break;
    923   1.98      yamt 
    924   1.98      yamt 		default:
    925   1.98      yamt 			panic("%s: impossible", __func__);
    926   1.22   thorpej 		}
    927   1.22   thorpej 	}
    928   1.51     enami 	if (by_list) {
    929  1.102        ad 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
    930   1.89        ad 		uvm_lwp_rele(curlwp);
    931   1.89        ad 	}
    932   1.96        ad 	mutex_exit(&uobj->vmobjlock);
    933   1.46       chs 	return 0;
    934    1.1       mrg }
    935    1.1       mrg 
    936    1.1       mrg /*
    937    1.1       mrg  * uao_get: fetch me a page
    938    1.1       mrg  *
    939    1.1       mrg  * we have three cases:
    940    1.1       mrg  * 1: page is resident     -> just return the page.
    941    1.1       mrg  * 2: page is zero-fill    -> allocate a new page and zero it.
    942    1.1       mrg  * 3: page is swapped out  -> fetch the page from swap.
    943    1.1       mrg  *
    944    1.1       mrg  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    945    1.1       mrg  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    946   1.40       chs  * then we will need to return EBUSY.
    947    1.1       mrg  *
    948    1.1       mrg  * => prefer map unlocked (not required)
    949    1.1       mrg  * => object must be locked!  we will _unlock_ it before starting any I/O.
    950    1.1       mrg  * => flags: PGO_ALLPAGES: get all of the pages
    951    1.1       mrg  *           PGO_LOCKED: fault data structures are locked
    952    1.1       mrg  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    953    1.1       mrg  * => NOTE: caller must check for released pages!!
    954    1.1       mrg  */
    955   1.46       chs 
    956    1.5       mrg static int
    957   1.67   thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    958   1.82      yamt     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    959    1.5       mrg {
    960   1.72      yamt #if defined(VMSWAP)
    961    1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    962   1.72      yamt #endif /* defined(VMSWAP) */
    963   1.28    kleink 	voff_t current_offset;
    964   1.52       scw 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    965   1.72      yamt 	int lcv, gotpages, maxpages, swslot, pageidx;
    966   1.85   thorpej 	bool done;
    967    1.5       mrg 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    968    1.5       mrg 
    969   1.27       chs 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    970   1.74      yamt 		    (struct uvm_aobj *)uobj, offset, flags,0);
    971   1.37       chs 
    972    1.5       mrg 	/*
    973    1.5       mrg  	 * get number of pages
    974    1.5       mrg  	 */
    975   1.46       chs 
    976    1.5       mrg 	maxpages = *npagesp;
    977    1.5       mrg 
    978    1.5       mrg 	/*
    979    1.5       mrg  	 * step 1: handled the case where fault data structures are locked.
    980    1.5       mrg  	 */
    981    1.1       mrg 
    982    1.5       mrg 	if (flags & PGO_LOCKED) {
    983   1.46       chs 
    984    1.5       mrg 		/*
    985    1.5       mrg  		 * step 1a: get pages that are already resident.   only do
    986    1.5       mrg 		 * this if the data structures are locked (i.e. the first
    987    1.5       mrg 		 * time through).
    988    1.5       mrg  		 */
    989    1.5       mrg 
    990   1.87   thorpej 		done = true;	/* be optimistic */
    991    1.5       mrg 		gotpages = 0;	/* # of pages we got so far */
    992    1.5       mrg 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    993    1.5       mrg 		    lcv++, current_offset += PAGE_SIZE) {
    994    1.5       mrg 			/* do we care about this page?  if not, skip it */
    995    1.5       mrg 			if (pps[lcv] == PGO_DONTCARE)
    996    1.5       mrg 				continue;
    997    1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
    998    1.5       mrg 
    999    1.5       mrg 			/*
   1000   1.30   thorpej  			 * if page is new, attempt to allocate the page,
   1001   1.30   thorpej 			 * zero-fill'd.
   1002    1.5       mrg  			 */
   1003   1.46       chs 
   1004   1.46       chs 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
   1005   1.15       chs 			    current_offset >> PAGE_SHIFT) == 0) {
   1006    1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
   1007   1.30   thorpej 				    NULL, UVM_PGA_ZERO);
   1008    1.5       mrg 				if (ptmp) {
   1009    1.5       mrg 					/* new page */
   1010   1.47       chs 					ptmp->flags &= ~(PG_FAKE);
   1011    1.5       mrg 					ptmp->pqflags |= PQ_AOBJ;
   1012   1.47       chs 					goto gotpage;
   1013    1.5       mrg 				}
   1014    1.5       mrg 			}
   1015    1.5       mrg 
   1016    1.5       mrg 			/*
   1017   1.46       chs 			 * to be useful must get a non-busy page
   1018    1.5       mrg 			 */
   1019   1.46       chs 
   1020   1.46       chs 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
   1021    1.5       mrg 				if (lcv == centeridx ||
   1022    1.5       mrg 				    (flags & PGO_ALLPAGES) != 0)
   1023    1.5       mrg 					/* need to do a wait or I/O! */
   1024   1.87   thorpej 					done = false;
   1025    1.5       mrg 					continue;
   1026    1.5       mrg 			}
   1027    1.5       mrg 
   1028    1.5       mrg 			/*
   1029    1.5       mrg 			 * useful page: busy/lock it and plug it in our
   1030    1.5       mrg 			 * result array
   1031    1.5       mrg 			 */
   1032   1.46       chs 
   1033    1.5       mrg 			/* caller must un-busy this page */
   1034   1.41       chs 			ptmp->flags |= PG_BUSY;
   1035    1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get1");
   1036   1.47       chs gotpage:
   1037    1.5       mrg 			pps[lcv] = ptmp;
   1038    1.5       mrg 			gotpages++;
   1039   1.46       chs 		}
   1040    1.5       mrg 
   1041    1.5       mrg 		/*
   1042    1.5       mrg  		 * step 1b: now we've either done everything needed or we
   1043    1.5       mrg 		 * to unlock and do some waiting or I/O.
   1044    1.5       mrg  		 */
   1045    1.5       mrg 
   1046    1.5       mrg 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1047    1.5       mrg 		*npagesp = gotpages;
   1048    1.5       mrg 		if (done)
   1049   1.46       chs 			return 0;
   1050    1.5       mrg 		else
   1051   1.46       chs 			return EBUSY;
   1052    1.1       mrg 	}
   1053    1.1       mrg 
   1054    1.5       mrg 	/*
   1055    1.5       mrg  	 * step 2: get non-resident or busy pages.
   1056    1.5       mrg  	 * object is locked.   data structures are unlocked.
   1057    1.5       mrg  	 */
   1058    1.5       mrg 
   1059   1.76      yamt 	if ((flags & PGO_SYNCIO) == 0) {
   1060   1.76      yamt 		goto done;
   1061   1.76      yamt 	}
   1062   1.76      yamt 
   1063    1.5       mrg 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1064    1.5       mrg 	    lcv++, current_offset += PAGE_SIZE) {
   1065   1.27       chs 
   1066    1.5       mrg 		/*
   1067    1.5       mrg 		 * - skip over pages we've already gotten or don't want
   1068    1.5       mrg 		 * - skip over pages we don't _have_ to get
   1069    1.5       mrg 		 */
   1070   1.27       chs 
   1071    1.5       mrg 		if (pps[lcv] != NULL ||
   1072    1.5       mrg 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1073    1.5       mrg 			continue;
   1074    1.5       mrg 
   1075   1.27       chs 		pageidx = current_offset >> PAGE_SHIFT;
   1076   1.27       chs 
   1077    1.5       mrg 		/*
   1078    1.5       mrg  		 * we have yet to locate the current page (pps[lcv]).   we
   1079    1.5       mrg 		 * first look for a page that is already at the current offset.
   1080    1.5       mrg 		 * if we find a page, we check to see if it is busy or
   1081    1.5       mrg 		 * released.  if that is the case, then we sleep on the page
   1082    1.5       mrg 		 * until it is no longer busy or released and repeat the lookup.
   1083    1.5       mrg 		 * if the page we found is neither busy nor released, then we
   1084    1.5       mrg 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1085    1.5       mrg 		 * 'break's the following while loop and indicates we are
   1086    1.5       mrg 		 * ready to move on to the next page in the "lcv" loop above.
   1087    1.5       mrg  		 *
   1088    1.5       mrg  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1089    1.5       mrg 		 * then it means that we allocated a new busy/fake/clean page
   1090    1.5       mrg 		 * ptmp in the object and we need to do I/O to fill in the data.
   1091    1.5       mrg  		 */
   1092    1.5       mrg 
   1093    1.5       mrg 		/* top of "pps" while loop */
   1094    1.5       mrg 		while (pps[lcv] == NULL) {
   1095    1.5       mrg 			/* look for a resident page */
   1096    1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
   1097    1.5       mrg 
   1098    1.5       mrg 			/* not resident?   allocate one now (if we can) */
   1099    1.5       mrg 			if (ptmp == NULL) {
   1100    1.5       mrg 
   1101    1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
   1102   1.19       chs 				    NULL, 0);
   1103    1.5       mrg 
   1104    1.5       mrg 				/* out of RAM? */
   1105    1.5       mrg 				if (ptmp == NULL) {
   1106   1.96        ad 					mutex_exit(&uobj->vmobjlock);
   1107    1.5       mrg 					UVMHIST_LOG(pdhist,
   1108    1.5       mrg 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1109    1.5       mrg 					uvm_wait("uao_getpage");
   1110   1.96        ad 					mutex_enter(&uobj->vmobjlock);
   1111   1.41       chs 					continue;
   1112    1.5       mrg 				}
   1113    1.5       mrg 
   1114    1.5       mrg 				/*
   1115    1.5       mrg 				 * safe with PQ's unlocked: because we just
   1116    1.5       mrg 				 * alloc'd the page
   1117    1.5       mrg 				 */
   1118   1.46       chs 
   1119    1.5       mrg 				ptmp->pqflags |= PQ_AOBJ;
   1120    1.5       mrg 
   1121   1.41       chs 				/*
   1122    1.5       mrg 				 * got new page ready for I/O.  break pps while
   1123    1.5       mrg 				 * loop.  pps[lcv] is still NULL.
   1124    1.5       mrg 				 */
   1125   1.46       chs 
   1126    1.5       mrg 				break;
   1127    1.5       mrg 			}
   1128    1.5       mrg 
   1129    1.5       mrg 			/* page is there, see if we need to wait on it */
   1130   1.46       chs 			if ((ptmp->flags & PG_BUSY) != 0) {
   1131    1.5       mrg 				ptmp->flags |= PG_WANTED;
   1132    1.5       mrg 				UVMHIST_LOG(pdhist,
   1133    1.5       mrg 				    "sleeping, ptmp->flags 0x%x\n",
   1134    1.5       mrg 				    ptmp->flags,0,0,0);
   1135   1.23   thorpej 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1136   1.87   thorpej 				    false, "uao_get", 0);
   1137   1.96        ad 				mutex_enter(&uobj->vmobjlock);
   1138   1.46       chs 				continue;
   1139    1.5       mrg 			}
   1140   1.41       chs 
   1141   1.41       chs 			/*
   1142    1.5       mrg  			 * if we get here then the page has become resident and
   1143    1.5       mrg 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1144    1.5       mrg 			 * own it) and set pps[lcv] (so that we exit the while
   1145    1.5       mrg 			 * loop).
   1146    1.5       mrg  			 */
   1147   1.46       chs 
   1148    1.5       mrg 			/* we own it, caller must un-busy */
   1149    1.5       mrg 			ptmp->flags |= PG_BUSY;
   1150    1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1151    1.5       mrg 			pps[lcv] = ptmp;
   1152    1.5       mrg 		}
   1153    1.5       mrg 
   1154    1.5       mrg 		/*
   1155    1.5       mrg  		 * if we own the valid page at the correct offset, pps[lcv] will
   1156    1.5       mrg  		 * point to it.   nothing more to do except go to the next page.
   1157    1.5       mrg  		 */
   1158   1.46       chs 
   1159    1.5       mrg 		if (pps[lcv])
   1160    1.5       mrg 			continue;			/* next lcv */
   1161    1.5       mrg 
   1162    1.5       mrg 		/*
   1163   1.41       chs  		 * we have a "fake/busy/clean" page that we just allocated.
   1164    1.5       mrg  		 * do the needed "i/o", either reading from swap or zeroing.
   1165    1.5       mrg  		 */
   1166   1.46       chs 
   1167   1.46       chs 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1168    1.5       mrg 
   1169    1.5       mrg 		/*
   1170    1.5       mrg  		 * just zero the page if there's nothing in swap.
   1171    1.5       mrg  		 */
   1172   1.46       chs 
   1173   1.46       chs 		if (swslot == 0) {
   1174   1.46       chs 
   1175    1.5       mrg 			/*
   1176    1.5       mrg 			 * page hasn't existed before, just zero it.
   1177    1.5       mrg 			 */
   1178   1.46       chs 
   1179    1.5       mrg 			uvm_pagezero(ptmp);
   1180   1.27       chs 		} else {
   1181   1.72      yamt #if defined(VMSWAP)
   1182   1.72      yamt 			int error;
   1183   1.72      yamt 
   1184    1.5       mrg 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1185    1.5       mrg 			     swslot, 0,0,0);
   1186    1.5       mrg 
   1187    1.5       mrg 			/*
   1188    1.5       mrg 			 * page in the swapped-out page.
   1189    1.5       mrg 			 * unlock object for i/o, relock when done.
   1190    1.5       mrg 			 */
   1191   1.46       chs 
   1192   1.96        ad 			mutex_exit(&uobj->vmobjlock);
   1193   1.46       chs 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1194   1.96        ad 			mutex_enter(&uobj->vmobjlock);
   1195    1.5       mrg 
   1196    1.5       mrg 			/*
   1197    1.5       mrg 			 * I/O done.  check for errors.
   1198    1.5       mrg 			 */
   1199   1.46       chs 
   1200   1.46       chs 			if (error != 0) {
   1201    1.5       mrg 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1202   1.46       chs 				    error,0,0,0);
   1203    1.5       mrg 				if (ptmp->flags & PG_WANTED)
   1204   1.24   thorpej 					wakeup(ptmp);
   1205   1.27       chs 
   1206   1.27       chs 				/*
   1207   1.27       chs 				 * remove the swap slot from the aobj
   1208   1.27       chs 				 * and mark the aobj as having no real slot.
   1209   1.27       chs 				 * don't free the swap slot, thus preventing
   1210   1.27       chs 				 * it from being used again.
   1211   1.27       chs 				 */
   1212   1.46       chs 
   1213   1.27       chs 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1214   1.27       chs 							SWSLOT_BAD);
   1215   1.57        pk 				if (swslot > 0) {
   1216   1.45       chs 					uvm_swap_markbad(swslot, 1);
   1217   1.45       chs 				}
   1218   1.27       chs 
   1219   1.96        ad 				mutex_enter(&uvm_pageqlock);
   1220    1.5       mrg 				uvm_pagefree(ptmp);
   1221   1.96        ad 				mutex_exit(&uvm_pageqlock);
   1222   1.96        ad 				mutex_exit(&uobj->vmobjlock);
   1223   1.46       chs 				return error;
   1224    1.5       mrg 			}
   1225   1.72      yamt #else /* defined(VMSWAP) */
   1226   1.72      yamt 			panic("%s: pagein", __func__);
   1227   1.72      yamt #endif /* defined(VMSWAP) */
   1228    1.5       mrg 		}
   1229    1.5       mrg 
   1230   1.78      yamt 		if ((access_type & VM_PROT_WRITE) == 0) {
   1231   1.78      yamt 			ptmp->flags |= PG_CLEAN;
   1232   1.78      yamt 			pmap_clear_modify(ptmp);
   1233   1.78      yamt 		}
   1234   1.78      yamt 
   1235   1.41       chs 		/*
   1236    1.5       mrg  		 * we got the page!   clear the fake flag (indicates valid
   1237    1.5       mrg 		 * data now in page) and plug into our result array.   note
   1238   1.41       chs 		 * that page is still busy.
   1239    1.5       mrg  		 *
   1240    1.5       mrg  		 * it is the callers job to:
   1241    1.5       mrg  		 * => check if the page is released
   1242    1.5       mrg  		 * => unbusy the page
   1243    1.5       mrg  		 * => activate the page
   1244    1.5       mrg  		 */
   1245    1.5       mrg 
   1246   1.46       chs 		ptmp->flags &= ~PG_FAKE;
   1247    1.5       mrg 		pps[lcv] = ptmp;
   1248   1.46       chs 	}
   1249    1.1       mrg 
   1250    1.1       mrg 	/*
   1251    1.5       mrg  	 * finally, unlock object and return.
   1252    1.5       mrg  	 */
   1253    1.1       mrg 
   1254   1.76      yamt done:
   1255   1.96        ad 	mutex_exit(&uobj->vmobjlock);
   1256    1.5       mrg 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1257   1.46       chs 	return 0;
   1258    1.1       mrg }
   1259    1.1       mrg 
   1260   1.72      yamt #if defined(VMSWAP)
   1261   1.72      yamt 
   1262    1.1       mrg /*
   1263   1.18       chs  * uao_dropswap:  release any swap resources from this aobj page.
   1264   1.41       chs  *
   1265   1.18       chs  * => aobj must be locked or have a reference count of 0.
   1266   1.18       chs  */
   1267   1.18       chs 
   1268   1.18       chs void
   1269   1.67   thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
   1270   1.18       chs {
   1271   1.18       chs 	int slot;
   1272   1.18       chs 
   1273   1.18       chs 	slot = uao_set_swslot(uobj, pageidx, 0);
   1274   1.18       chs 	if (slot) {
   1275   1.18       chs 		uvm_swap_free(slot, 1);
   1276   1.18       chs 	}
   1277   1.27       chs }
   1278   1.27       chs 
   1279   1.27       chs /*
   1280   1.27       chs  * page in every page in every aobj that is paged-out to a range of swslots.
   1281   1.41       chs  *
   1282   1.27       chs  * => nothing should be locked.
   1283   1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1284   1.27       chs  */
   1285   1.46       chs 
   1286   1.85   thorpej bool
   1287   1.67   thorpej uao_swap_off(int startslot, int endslot)
   1288   1.27       chs {
   1289   1.27       chs 	struct uvm_aobj *aobj, *nextaobj;
   1290   1.85   thorpej 	bool rv;
   1291   1.27       chs 
   1292   1.27       chs 	/*
   1293   1.27       chs 	 * walk the list of all aobjs.
   1294   1.27       chs 	 */
   1295   1.27       chs 
   1296   1.27       chs restart:
   1297   1.90        ad 	mutex_enter(&uao_list_lock);
   1298   1.27       chs 	for (aobj = LIST_FIRST(&uao_list);
   1299   1.27       chs 	     aobj != NULL;
   1300   1.27       chs 	     aobj = nextaobj) {
   1301   1.27       chs 
   1302   1.27       chs 		/*
   1303   1.46       chs 		 * try to get the object lock, start all over if we fail.
   1304   1.27       chs 		 * most of the time we'll get the aobj lock,
   1305   1.27       chs 		 * so this should be a rare case.
   1306   1.27       chs 		 */
   1307   1.46       chs 
   1308   1.96        ad 		if (!mutex_tryenter(&aobj->u_obj.vmobjlock)) {
   1309   1.90        ad 			mutex_exit(&uao_list_lock);
   1310   1.96        ad 			/* XXX Better than yielding but inadequate. */
   1311   1.96        ad 			kpause("livelock", false, 1, NULL);
   1312   1.27       chs 			goto restart;
   1313   1.27       chs 		}
   1314   1.27       chs 
   1315   1.27       chs 		/*
   1316   1.27       chs 		 * add a ref to the aobj so it doesn't disappear
   1317   1.27       chs 		 * while we're working.
   1318   1.27       chs 		 */
   1319   1.46       chs 
   1320   1.27       chs 		uao_reference_locked(&aobj->u_obj);
   1321   1.27       chs 
   1322   1.27       chs 		/*
   1323   1.27       chs 		 * now it's safe to unlock the uao list.
   1324   1.27       chs 		 */
   1325   1.46       chs 
   1326   1.90        ad 		mutex_exit(&uao_list_lock);
   1327   1.27       chs 
   1328   1.27       chs 		/*
   1329   1.27       chs 		 * page in any pages in the swslot range.
   1330   1.27       chs 		 * if there's an error, abort and return the error.
   1331   1.27       chs 		 */
   1332   1.46       chs 
   1333   1.27       chs 		rv = uao_pagein(aobj, startslot, endslot);
   1334   1.27       chs 		if (rv) {
   1335   1.27       chs 			uao_detach_locked(&aobj->u_obj);
   1336   1.27       chs 			return rv;
   1337   1.27       chs 		}
   1338   1.27       chs 
   1339   1.27       chs 		/*
   1340   1.27       chs 		 * we're done with this aobj.
   1341   1.27       chs 		 * relock the list and drop our ref on the aobj.
   1342   1.27       chs 		 */
   1343   1.46       chs 
   1344   1.90        ad 		mutex_enter(&uao_list_lock);
   1345   1.27       chs 		nextaobj = LIST_NEXT(aobj, u_list);
   1346   1.27       chs 		uao_detach_locked(&aobj->u_obj);
   1347   1.27       chs 	}
   1348   1.27       chs 
   1349   1.27       chs 	/*
   1350   1.27       chs 	 * done with traversal, unlock the list
   1351   1.27       chs 	 */
   1352   1.90        ad 	mutex_exit(&uao_list_lock);
   1353   1.87   thorpej 	return false;
   1354   1.27       chs }
   1355   1.27       chs 
   1356   1.27       chs 
   1357   1.27       chs /*
   1358   1.27       chs  * page in any pages from aobj in the given range.
   1359   1.27       chs  *
   1360   1.27       chs  * => aobj must be locked and is returned locked.
   1361   1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1362   1.27       chs  */
   1363   1.85   thorpej static bool
   1364   1.67   thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1365   1.27       chs {
   1366   1.85   thorpej 	bool rv;
   1367   1.27       chs 
   1368   1.27       chs 	if (UAO_USES_SWHASH(aobj)) {
   1369   1.27       chs 		struct uao_swhash_elt *elt;
   1370   1.65  christos 		int buck;
   1371   1.27       chs 
   1372   1.27       chs restart:
   1373   1.65  christos 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1374   1.65  christos 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1375   1.27       chs 			     elt != NULL;
   1376   1.27       chs 			     elt = LIST_NEXT(elt, list)) {
   1377   1.27       chs 				int i;
   1378   1.27       chs 
   1379   1.27       chs 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1380   1.27       chs 					int slot = elt->slots[i];
   1381   1.27       chs 
   1382   1.27       chs 					/*
   1383   1.27       chs 					 * if the slot isn't in range, skip it.
   1384   1.27       chs 					 */
   1385   1.46       chs 
   1386   1.41       chs 					if (slot < startslot ||
   1387   1.27       chs 					    slot >= endslot) {
   1388   1.27       chs 						continue;
   1389   1.27       chs 					}
   1390   1.27       chs 
   1391   1.27       chs 					/*
   1392   1.27       chs 					 * process the page,
   1393   1.27       chs 					 * the start over on this object
   1394   1.27       chs 					 * since the swhash elt
   1395   1.27       chs 					 * may have been freed.
   1396   1.27       chs 					 */
   1397   1.46       chs 
   1398   1.27       chs 					rv = uao_pagein_page(aobj,
   1399   1.27       chs 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1400   1.27       chs 					if (rv) {
   1401   1.27       chs 						return rv;
   1402   1.27       chs 					}
   1403   1.27       chs 					goto restart;
   1404   1.27       chs 				}
   1405   1.27       chs 			}
   1406   1.27       chs 		}
   1407   1.27       chs 	} else {
   1408   1.27       chs 		int i;
   1409   1.27       chs 
   1410   1.27       chs 		for (i = 0; i < aobj->u_pages; i++) {
   1411   1.27       chs 			int slot = aobj->u_swslots[i];
   1412   1.27       chs 
   1413   1.27       chs 			/*
   1414   1.27       chs 			 * if the slot isn't in range, skip it
   1415   1.27       chs 			 */
   1416   1.46       chs 
   1417   1.27       chs 			if (slot < startslot || slot >= endslot) {
   1418   1.27       chs 				continue;
   1419   1.27       chs 			}
   1420   1.27       chs 
   1421   1.27       chs 			/*
   1422   1.27       chs 			 * process the page.
   1423   1.27       chs 			 */
   1424   1.46       chs 
   1425   1.27       chs 			rv = uao_pagein_page(aobj, i);
   1426   1.27       chs 			if (rv) {
   1427   1.27       chs 				return rv;
   1428   1.27       chs 			}
   1429   1.27       chs 		}
   1430   1.27       chs 	}
   1431   1.27       chs 
   1432   1.87   thorpej 	return false;
   1433   1.27       chs }
   1434   1.27       chs 
   1435   1.27       chs /*
   1436   1.27       chs  * page in a page from an aobj.  used for swap_off.
   1437   1.87   thorpej  * returns true if pagein was aborted due to lack of memory.
   1438   1.27       chs  *
   1439   1.27       chs  * => aobj must be locked and is returned locked.
   1440   1.27       chs  */
   1441   1.46       chs 
   1442   1.85   thorpej static bool
   1443   1.67   thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1444   1.27       chs {
   1445   1.27       chs 	struct vm_page *pg;
   1446   1.57        pk 	int rv, npages;
   1447   1.27       chs 
   1448   1.27       chs 	pg = NULL;
   1449   1.27       chs 	npages = 1;
   1450   1.27       chs 	/* locked: aobj */
   1451   1.27       chs 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1452   1.77      yamt 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1453   1.27       chs 	/* unlocked: aobj */
   1454   1.27       chs 
   1455   1.27       chs 	/*
   1456   1.27       chs 	 * relock and finish up.
   1457   1.27       chs 	 */
   1458   1.46       chs 
   1459   1.96        ad 	mutex_enter(&aobj->u_obj.vmobjlock);
   1460   1.27       chs 	switch (rv) {
   1461   1.40       chs 	case 0:
   1462   1.27       chs 		break;
   1463   1.27       chs 
   1464   1.40       chs 	case EIO:
   1465   1.40       chs 	case ERESTART:
   1466   1.46       chs 
   1467   1.27       chs 		/*
   1468   1.27       chs 		 * nothing more to do on errors.
   1469   1.40       chs 		 * ERESTART can only mean that the anon was freed,
   1470   1.27       chs 		 * so again there's nothing to do.
   1471   1.27       chs 		 */
   1472   1.46       chs 
   1473   1.87   thorpej 		return false;
   1474   1.59        pk 
   1475   1.59        pk 	default:
   1476   1.87   thorpej 		return true;
   1477   1.27       chs 	}
   1478   1.27       chs 
   1479   1.27       chs 	/*
   1480   1.27       chs 	 * ok, we've got the page now.
   1481   1.27       chs 	 * mark it as dirty, clear its swslot and un-busy it.
   1482   1.27       chs 	 */
   1483   1.57        pk 	uao_dropswap(&aobj->u_obj, pageidx);
   1484   1.27       chs 
   1485   1.27       chs 	/*
   1486   1.80      yamt 	 * make sure it's on a page queue.
   1487   1.27       chs 	 */
   1488   1.96        ad 	mutex_enter(&uvm_pageqlock);
   1489   1.58        pk 	if (pg->wire_count == 0)
   1490   1.80      yamt 		uvm_pageenqueue(pg);
   1491   1.96        ad 	mutex_exit(&uvm_pageqlock);
   1492   1.56      yamt 
   1493   1.59        pk 	if (pg->flags & PG_WANTED) {
   1494   1.59        pk 		wakeup(pg);
   1495   1.59        pk 	}
   1496   1.59        pk 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1497   1.56      yamt 	UVM_PAGE_OWN(pg, NULL);
   1498   1.56      yamt 
   1499   1.87   thorpej 	return false;
   1500    1.1       mrg }
   1501   1.72      yamt 
   1502   1.75      yamt /*
   1503   1.75      yamt  * uao_dropswap_range: drop swapslots in the range.
   1504   1.75      yamt  *
   1505   1.75      yamt  * => aobj must be locked and is returned locked.
   1506   1.75      yamt  * => start is inclusive.  end is exclusive.
   1507   1.75      yamt  */
   1508   1.75      yamt 
   1509   1.75      yamt void
   1510   1.75      yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1511   1.75      yamt {
   1512   1.75      yamt 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1513   1.75      yamt 
   1514   1.96        ad 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1515   1.75      yamt 
   1516   1.75      yamt 	uao_dropswap_range1(aobj, start, end);
   1517   1.75      yamt }
   1518   1.75      yamt 
   1519   1.75      yamt static void
   1520   1.75      yamt uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1521   1.75      yamt {
   1522   1.75      yamt 	int swpgonlydelta = 0;
   1523   1.75      yamt 
   1524   1.75      yamt 	if (end == 0) {
   1525   1.75      yamt 		end = INT64_MAX;
   1526   1.75      yamt 	}
   1527   1.75      yamt 
   1528   1.75      yamt 	if (UAO_USES_SWHASH(aobj)) {
   1529   1.75      yamt 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1530   1.75      yamt 		voff_t taghi;
   1531   1.75      yamt 		voff_t taglo;
   1532   1.75      yamt 
   1533   1.75      yamt 		taglo = UAO_SWHASH_ELT_TAG(start);
   1534   1.75      yamt 		taghi = UAO_SWHASH_ELT_TAG(end);
   1535   1.75      yamt 
   1536   1.75      yamt 		for (i = 0; i < hashbuckets; i++) {
   1537   1.75      yamt 			struct uao_swhash_elt *elt, *next;
   1538   1.75      yamt 
   1539   1.75      yamt 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1540   1.75      yamt 			     elt != NULL;
   1541   1.75      yamt 			     elt = next) {
   1542   1.75      yamt 				int startidx, endidx;
   1543   1.75      yamt 				int j;
   1544   1.75      yamt 
   1545   1.75      yamt 				next = LIST_NEXT(elt, list);
   1546   1.75      yamt 
   1547   1.75      yamt 				if (elt->tag < taglo || taghi < elt->tag) {
   1548   1.75      yamt 					continue;
   1549   1.75      yamt 				}
   1550   1.75      yamt 
   1551   1.75      yamt 				if (elt->tag == taglo) {
   1552   1.75      yamt 					startidx =
   1553   1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1554   1.75      yamt 				} else {
   1555   1.75      yamt 					startidx = 0;
   1556   1.75      yamt 				}
   1557   1.75      yamt 
   1558   1.75      yamt 				if (elt->tag == taghi) {
   1559   1.75      yamt 					endidx =
   1560   1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1561   1.75      yamt 				} else {
   1562   1.75      yamt 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1563   1.75      yamt 				}
   1564   1.75      yamt 
   1565   1.75      yamt 				for (j = startidx; j < endidx; j++) {
   1566   1.75      yamt 					int slot = elt->slots[j];
   1567   1.75      yamt 
   1568   1.75      yamt 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1569   1.75      yamt 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1570   1.75      yamt 					    + j) << PAGE_SHIFT) == NULL);
   1571   1.75      yamt 					if (slot > 0) {
   1572   1.75      yamt 						uvm_swap_free(slot, 1);
   1573   1.75      yamt 						swpgonlydelta++;
   1574   1.75      yamt 						KASSERT(elt->count > 0);
   1575   1.75      yamt 						elt->slots[j] = 0;
   1576   1.75      yamt 						elt->count--;
   1577   1.75      yamt 					}
   1578   1.75      yamt 				}
   1579   1.75      yamt 
   1580   1.75      yamt 				if (elt->count == 0) {
   1581   1.75      yamt 					LIST_REMOVE(elt, list);
   1582   1.75      yamt 					pool_put(&uao_swhash_elt_pool, elt);
   1583   1.75      yamt 				}
   1584   1.75      yamt 			}
   1585   1.75      yamt 		}
   1586   1.75      yamt 	} else {
   1587   1.75      yamt 		int i;
   1588   1.75      yamt 
   1589   1.75      yamt 		if (aobj->u_pages < end) {
   1590   1.75      yamt 			end = aobj->u_pages;
   1591   1.75      yamt 		}
   1592   1.75      yamt 		for (i = start; i < end; i++) {
   1593   1.75      yamt 			int slot = aobj->u_swslots[i];
   1594   1.75      yamt 
   1595   1.75      yamt 			if (slot > 0) {
   1596   1.75      yamt 				uvm_swap_free(slot, 1);
   1597   1.75      yamt 				swpgonlydelta++;
   1598   1.75      yamt 			}
   1599   1.75      yamt 		}
   1600   1.75      yamt 	}
   1601   1.75      yamt 
   1602   1.75      yamt 	/*
   1603   1.75      yamt 	 * adjust the counter of pages only in swap for all
   1604   1.75      yamt 	 * the swap slots we've freed.
   1605   1.75      yamt 	 */
   1606   1.75      yamt 
   1607   1.75      yamt 	if (swpgonlydelta > 0) {
   1608   1.92        ad 		mutex_enter(&uvm_swap_data_lock);
   1609   1.75      yamt 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1610   1.75      yamt 		uvmexp.swpgonly -= swpgonlydelta;
   1611   1.92        ad 		mutex_exit(&uvm_swap_data_lock);
   1612   1.75      yamt 	}
   1613   1.75      yamt }
   1614   1.75      yamt 
   1615   1.72      yamt #endif /* defined(VMSWAP) */
   1616