Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.68.2.7
      1  1.68.2.7      yamt /*	$NetBSD: uvm_aobj.c,v 1.68.2.7 2008/03/17 09:15:52 yamt Exp $	*/
      2       1.6       mrg 
      3       1.7       chs /*
      4       1.7       chs  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5       1.7       chs  *                    Washington University.
      6       1.7       chs  * All rights reserved.
      7       1.7       chs  *
      8       1.7       chs  * Redistribution and use in source and binary forms, with or without
      9       1.7       chs  * modification, are permitted provided that the following conditions
     10       1.7       chs  * are met:
     11       1.7       chs  * 1. Redistributions of source code must retain the above copyright
     12       1.7       chs  *    notice, this list of conditions and the following disclaimer.
     13       1.7       chs  * 2. Redistributions in binary form must reproduce the above copyright
     14       1.7       chs  *    notice, this list of conditions and the following disclaimer in the
     15       1.7       chs  *    documentation and/or other materials provided with the distribution.
     16       1.7       chs  * 3. All advertising materials mentioning features or use of this software
     17       1.7       chs  *    must display the following acknowledgement:
     18       1.7       chs  *      This product includes software developed by Charles D. Cranor and
     19       1.7       chs  *      Washington University.
     20       1.7       chs  * 4. The name of the author may not be used to endorse or promote products
     21       1.7       chs  *    derived from this software without specific prior written permission.
     22       1.7       chs  *
     23       1.7       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24       1.7       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25       1.7       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26       1.7       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27       1.7       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28       1.7       chs  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29       1.7       chs  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30       1.7       chs  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31       1.7       chs  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32       1.7       chs  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33       1.7       chs  *
     34       1.4       mrg  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     35       1.4       mrg  */
     36       1.7       chs /*
     37       1.7       chs  * uvm_aobj.c: anonymous memory uvm_object pager
     38       1.7       chs  *
     39       1.7       chs  * author: Chuck Silvers <chuq (at) chuq.com>
     40       1.7       chs  * started: Jan-1998
     41       1.7       chs  *
     42       1.7       chs  * - design mostly from Chuck Cranor
     43       1.7       chs  */
     44      1.49     lukem 
     45      1.49     lukem #include <sys/cdefs.h>
     46  1.68.2.7      yamt __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.68.2.7 2008/03/17 09:15:52 yamt Exp $");
     47       1.7       chs 
     48       1.7       chs #include "opt_uvmhist.h"
     49       1.1       mrg 
     50       1.1       mrg #include <sys/param.h>
     51       1.1       mrg #include <sys/systm.h>
     52       1.1       mrg #include <sys/proc.h>
     53       1.1       mrg #include <sys/malloc.h>
     54      1.37       chs #include <sys/kernel.h>
     55      1.12   thorpej #include <sys/pool.h>
     56       1.1       mrg 
     57       1.1       mrg #include <uvm/uvm.h>
     58       1.1       mrg 
     59       1.1       mrg /*
     60       1.1       mrg  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     61       1.1       mrg  * to keeping the list of resident pages, it also keeps a list of
     62       1.1       mrg  * allocated swap blocks.  depending on the size of the aobj this list
     63       1.1       mrg  * of allocated swap blocks is either stored in an array (small objects)
     64       1.1       mrg  * or in a hash table (large objects).
     65       1.1       mrg  */
     66       1.1       mrg 
     67       1.1       mrg /*
     68       1.1       mrg  * local structures
     69       1.1       mrg  */
     70       1.1       mrg 
     71       1.1       mrg /*
     72       1.1       mrg  * for hash tables, we break the address space of the aobj into blocks
     73       1.1       mrg  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     74       1.1       mrg  * be a power of two.
     75       1.1       mrg  */
     76       1.1       mrg 
     77       1.1       mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
     78       1.1       mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     79       1.1       mrg 
     80       1.1       mrg /* get the "tag" for this page index */
     81       1.1       mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     82       1.1       mrg 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     83       1.1       mrg 
     84  1.68.2.1      yamt #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     85  1.68.2.1      yamt 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     86  1.68.2.1      yamt 
     87       1.1       mrg /* given an ELT and a page index, find the swap slot */
     88       1.1       mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     89  1.68.2.1      yamt 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     90       1.1       mrg 
     91       1.1       mrg /* given an ELT, return its pageidx base */
     92       1.1       mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     93       1.1       mrg 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     94       1.1       mrg 
     95       1.1       mrg /*
     96       1.1       mrg  * the swhash hash function
     97       1.1       mrg  */
     98      1.46       chs 
     99       1.1       mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
    100       1.1       mrg 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
    101       1.1       mrg 			    & (AOBJ)->u_swhashmask)])
    102       1.1       mrg 
    103       1.1       mrg /*
    104       1.1       mrg  * the swhash threshhold determines if we will use an array or a
    105       1.1       mrg  * hash table to store the list of allocated swap blocks.
    106       1.1       mrg  */
    107       1.1       mrg 
    108       1.1       mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    109       1.1       mrg #define UAO_USES_SWHASH(AOBJ) \
    110       1.1       mrg 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    111       1.1       mrg 
    112       1.1       mrg /*
    113       1.3       chs  * the number of buckets in a swhash, with an upper bound
    114       1.1       mrg  */
    115      1.46       chs 
    116       1.1       mrg #define UAO_SWHASH_MAXBUCKETS 256
    117       1.1       mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
    118      1.46       chs 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    119       1.1       mrg 	     UAO_SWHASH_MAXBUCKETS))
    120       1.1       mrg 
    121       1.1       mrg 
    122       1.1       mrg /*
    123       1.1       mrg  * uao_swhash_elt: when a hash table is being used, this structure defines
    124       1.1       mrg  * the format of an entry in the bucket list.
    125       1.1       mrg  */
    126       1.1       mrg 
    127       1.1       mrg struct uao_swhash_elt {
    128       1.5       mrg 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    129      1.28    kleink 	voff_t tag;				/* our 'tag' */
    130       1.5       mrg 	int count;				/* our number of active slots */
    131       1.5       mrg 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    132       1.1       mrg };
    133       1.1       mrg 
    134       1.1       mrg /*
    135       1.1       mrg  * uao_swhash: the swap hash table structure
    136       1.1       mrg  */
    137       1.1       mrg 
    138       1.1       mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
    139       1.1       mrg 
    140      1.12   thorpej /*
    141      1.12   thorpej  * uao_swhash_elt_pool: pool of uao_swhash_elt structures
    142      1.64    simonb  * NOTE: Pages for this pool must not come from a pageable kernel map!
    143      1.12   thorpej  */
    144      1.64    simonb POOL_INIT(uao_swhash_elt_pool, sizeof(struct uao_swhash_elt), 0, 0, 0,
    145  1.68.2.4      yamt     "uaoeltpl", NULL, IPL_VM);
    146       1.1       mrg 
    147       1.1       mrg /*
    148       1.1       mrg  * uvm_aobj: the actual anon-backed uvm_object
    149       1.1       mrg  *
    150       1.1       mrg  * => the uvm_object is at the top of the structure, this allows
    151      1.46       chs  *   (struct uvm_aobj *) == (struct uvm_object *)
    152       1.1       mrg  * => only one of u_swslots and u_swhash is used in any given aobj
    153       1.1       mrg  */
    154       1.1       mrg 
    155       1.1       mrg struct uvm_aobj {
    156       1.5       mrg 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    157  1.68.2.2      yamt 	pgoff_t u_pages;	 /* number of pages in entire object */
    158       1.5       mrg 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    159       1.5       mrg 	int *u_swslots;		 /* array of offset->swapslot mappings */
    160       1.5       mrg 				 /*
    161       1.5       mrg 				  * hashtable of offset->swapslot mappings
    162       1.5       mrg 				  * (u_swhash is an array of bucket heads)
    163       1.5       mrg 				  */
    164       1.5       mrg 	struct uao_swhash *u_swhash;
    165       1.5       mrg 	u_long u_swhashmask;		/* mask for hashtable */
    166       1.5       mrg 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    167       1.1       mrg };
    168       1.1       mrg 
    169       1.1       mrg /*
    170      1.12   thorpej  * uvm_aobj_pool: pool of uvm_aobj structures
    171      1.12   thorpej  */
    172      1.64    simonb POOL_INIT(uvm_aobj_pool, sizeof(struct uvm_aobj), 0, 0, 0, "aobjpl",
    173  1.68.2.4      yamt     &pool_allocator_nointr, IPL_NONE);
    174      1.54   thorpej 
    175      1.54   thorpej MALLOC_DEFINE(M_UVMAOBJ, "UVM aobj", "UVM aobj and related structures");
    176      1.12   thorpej 
    177      1.12   thorpej /*
    178       1.1       mrg  * local functions
    179       1.1       mrg  */
    180       1.1       mrg 
    181      1.62  junyoung static void	uao_free(struct uvm_aobj *);
    182      1.62  junyoung static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    183      1.62  junyoung 		    int *, int, vm_prot_t, int, int);
    184  1.68.2.3      yamt static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    185  1.68.2.1      yamt 
    186  1.68.2.1      yamt #if defined(VMSWAP)
    187  1.68.2.1      yamt static struct uao_swhash_elt *uao_find_swhash_elt
    188  1.68.2.3      yamt     (struct uvm_aobj *, int, bool);
    189  1.68.2.1      yamt 
    190  1.68.2.3      yamt static bool uao_pagein(struct uvm_aobj *, int, int);
    191  1.68.2.3      yamt static bool uao_pagein_page(struct uvm_aobj *, int);
    192  1.68.2.1      yamt static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    193  1.68.2.1      yamt #endif /* defined(VMSWAP) */
    194       1.1       mrg 
    195       1.1       mrg /*
    196       1.1       mrg  * aobj_pager
    197      1.41       chs  *
    198       1.1       mrg  * note that some functions (e.g. put) are handled elsewhere
    199       1.1       mrg  */
    200       1.1       mrg 
    201  1.68.2.5      yamt const struct uvm_pagerops aobj_pager = {
    202  1.68.2.5      yamt 	.pgo_reference = uao_reference,
    203  1.68.2.5      yamt 	.pgo_detach = uao_detach,
    204  1.68.2.5      yamt 	.pgo_get = uao_get,
    205  1.68.2.5      yamt 	.pgo_put = uao_put,
    206       1.1       mrg };
    207       1.1       mrg 
    208       1.1       mrg /*
    209       1.1       mrg  * uao_list: global list of active aobjs, locked by uao_list_lock
    210       1.1       mrg  */
    211       1.1       mrg 
    212       1.1       mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    213  1.68.2.4      yamt static kmutex_t uao_list_lock;
    214       1.1       mrg 
    215       1.1       mrg /*
    216       1.1       mrg  * functions
    217       1.1       mrg  */
    218       1.1       mrg 
    219       1.1       mrg /*
    220       1.1       mrg  * hash table/array related functions
    221       1.1       mrg  */
    222       1.1       mrg 
    223  1.68.2.1      yamt #if defined(VMSWAP)
    224  1.68.2.1      yamt 
    225       1.1       mrg /*
    226       1.1       mrg  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    227       1.1       mrg  * offset.
    228       1.1       mrg  *
    229       1.1       mrg  * => the object should be locked by the caller
    230       1.1       mrg  */
    231       1.1       mrg 
    232       1.5       mrg static struct uao_swhash_elt *
    233  1.68.2.3      yamt uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    234       1.5       mrg {
    235       1.5       mrg 	struct uao_swhash *swhash;
    236       1.5       mrg 	struct uao_swhash_elt *elt;
    237      1.28    kleink 	voff_t page_tag;
    238       1.1       mrg 
    239      1.45       chs 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    240      1.45       chs 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    241       1.1       mrg 
    242       1.5       mrg 	/*
    243       1.5       mrg 	 * now search the bucket for the requested tag
    244       1.5       mrg 	 */
    245      1.45       chs 
    246      1.37       chs 	LIST_FOREACH(elt, swhash, list) {
    247      1.45       chs 		if (elt->tag == page_tag) {
    248      1.45       chs 			return elt;
    249      1.45       chs 		}
    250       1.5       mrg 	}
    251      1.45       chs 	if (!create) {
    252       1.5       mrg 		return NULL;
    253      1.45       chs 	}
    254       1.5       mrg 
    255       1.5       mrg 	/*
    256      1.12   thorpej 	 * allocate a new entry for the bucket and init/insert it in
    257       1.5       mrg 	 */
    258      1.45       chs 
    259      1.45       chs 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    260      1.45       chs 	if (elt == NULL) {
    261      1.45       chs 		return NULL;
    262      1.45       chs 	}
    263       1.5       mrg 	LIST_INSERT_HEAD(swhash, elt, list);
    264       1.5       mrg 	elt->tag = page_tag;
    265       1.5       mrg 	elt->count = 0;
    266       1.9     perry 	memset(elt->slots, 0, sizeof(elt->slots));
    267      1.45       chs 	return elt;
    268       1.1       mrg }
    269       1.1       mrg 
    270       1.1       mrg /*
    271       1.1       mrg  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    272       1.1       mrg  *
    273      1.41       chs  * => object must be locked by caller
    274       1.1       mrg  */
    275      1.46       chs 
    276      1.46       chs int
    277      1.67   thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
    278       1.1       mrg {
    279      1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    280      1.46       chs 	struct uao_swhash_elt *elt;
    281       1.1       mrg 
    282       1.5       mrg 	/*
    283       1.5       mrg 	 * if noswap flag is set, then we never return a slot
    284       1.5       mrg 	 */
    285       1.1       mrg 
    286       1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    287       1.5       mrg 		return(0);
    288       1.1       mrg 
    289       1.5       mrg 	/*
    290       1.5       mrg 	 * if hashing, look in hash table.
    291       1.5       mrg 	 */
    292       1.1       mrg 
    293       1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    294  1.68.2.3      yamt 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    295       1.5       mrg 		if (elt)
    296       1.5       mrg 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    297       1.5       mrg 		else
    298      1.31   thorpej 			return(0);
    299       1.5       mrg 	}
    300       1.1       mrg 
    301      1.41       chs 	/*
    302       1.5       mrg 	 * otherwise, look in the array
    303       1.5       mrg 	 */
    304      1.46       chs 
    305       1.5       mrg 	return(aobj->u_swslots[pageidx]);
    306       1.1       mrg }
    307       1.1       mrg 
    308       1.1       mrg /*
    309       1.1       mrg  * uao_set_swslot: set the swap slot for a page in an aobj.
    310       1.1       mrg  *
    311       1.1       mrg  * => setting a slot to zero frees the slot
    312       1.1       mrg  * => object must be locked by caller
    313      1.45       chs  * => we return the old slot number, or -1 if we failed to allocate
    314      1.45       chs  *    memory to record the new slot number
    315       1.1       mrg  */
    316      1.46       chs 
    317       1.5       mrg int
    318      1.67   thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    319       1.5       mrg {
    320       1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    321      1.45       chs 	struct uao_swhash_elt *elt;
    322       1.5       mrg 	int oldslot;
    323       1.5       mrg 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    324       1.5       mrg 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    325       1.5       mrg 	    aobj, pageidx, slot, 0);
    326       1.1       mrg 
    327       1.5       mrg 	/*
    328      1.46       chs 	 * if noswap flag is set, then we can't set a non-zero slot.
    329       1.5       mrg 	 */
    330       1.1       mrg 
    331       1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    332       1.5       mrg 		if (slot == 0)
    333      1.46       chs 			return(0);
    334       1.1       mrg 
    335       1.5       mrg 		printf("uao_set_swslot: uobj = %p\n", uobj);
    336      1.46       chs 		panic("uao_set_swslot: NOSWAP object");
    337       1.5       mrg 	}
    338       1.1       mrg 
    339       1.5       mrg 	/*
    340       1.5       mrg 	 * are we using a hash table?  if so, add it in the hash.
    341       1.5       mrg 	 */
    342       1.1       mrg 
    343       1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    344      1.39       chs 
    345      1.12   thorpej 		/*
    346      1.12   thorpej 		 * Avoid allocating an entry just to free it again if
    347      1.12   thorpej 		 * the page had not swap slot in the first place, and
    348      1.12   thorpej 		 * we are freeing.
    349      1.12   thorpej 		 */
    350      1.39       chs 
    351      1.46       chs 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    352      1.12   thorpej 		if (elt == NULL) {
    353      1.45       chs 			return slot ? -1 : 0;
    354      1.12   thorpej 		}
    355       1.5       mrg 
    356       1.5       mrg 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    357       1.5       mrg 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    358       1.5       mrg 
    359       1.5       mrg 		/*
    360       1.5       mrg 		 * now adjust the elt's reference counter and free it if we've
    361       1.5       mrg 		 * dropped it to zero.
    362       1.5       mrg 		 */
    363       1.5       mrg 
    364       1.5       mrg 		if (slot) {
    365       1.5       mrg 			if (oldslot == 0)
    366       1.5       mrg 				elt->count++;
    367      1.45       chs 		} else {
    368      1.45       chs 			if (oldslot)
    369       1.5       mrg 				elt->count--;
    370       1.5       mrg 
    371       1.5       mrg 			if (elt->count == 0) {
    372       1.5       mrg 				LIST_REMOVE(elt, list);
    373      1.12   thorpej 				pool_put(&uao_swhash_elt_pool, elt);
    374       1.5       mrg 			}
    375       1.5       mrg 		}
    376      1.41       chs 	} else {
    377       1.5       mrg 		/* we are using an array */
    378       1.5       mrg 		oldslot = aobj->u_swslots[pageidx];
    379       1.5       mrg 		aobj->u_swslots[pageidx] = slot;
    380       1.5       mrg 	}
    381       1.5       mrg 	return (oldslot);
    382       1.1       mrg }
    383       1.1       mrg 
    384  1.68.2.1      yamt #endif /* defined(VMSWAP) */
    385  1.68.2.1      yamt 
    386       1.1       mrg /*
    387       1.1       mrg  * end of hash/array functions
    388       1.1       mrg  */
    389       1.1       mrg 
    390       1.1       mrg /*
    391       1.1       mrg  * uao_free: free all resources held by an aobj, and then free the aobj
    392       1.1       mrg  *
    393       1.1       mrg  * => the aobj should be dead
    394       1.1       mrg  */
    395      1.46       chs 
    396       1.1       mrg static void
    397      1.67   thorpej uao_free(struct uvm_aobj *aobj)
    398       1.1       mrg {
    399      1.46       chs 	int swpgonlydelta = 0;
    400       1.1       mrg 
    401  1.68.2.6      yamt 
    402  1.68.2.1      yamt #if defined(VMSWAP)
    403  1.68.2.1      yamt 	uao_dropswap_range1(aobj, 0, 0);
    404  1.68.2.4      yamt #endif /* defined(VMSWAP) */
    405  1.68.2.4      yamt 
    406  1.68.2.6      yamt 	mutex_exit(&aobj->u_obj.vmobjlock);
    407  1.68.2.1      yamt 
    408  1.68.2.4      yamt #if defined(VMSWAP)
    409       1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    410       1.1       mrg 
    411       1.5       mrg 		/*
    412  1.68.2.1      yamt 		 * free the hash table itself.
    413       1.5       mrg 		 */
    414      1.46       chs 
    415      1.34   thorpej 		free(aobj->u_swhash, M_UVMAOBJ);
    416       1.5       mrg 	} else {
    417       1.5       mrg 
    418       1.5       mrg 		/*
    419  1.68.2.1      yamt 		 * free the array itsself.
    420       1.5       mrg 		 */
    421       1.5       mrg 
    422      1.34   thorpej 		free(aobj->u_swslots, M_UVMAOBJ);
    423       1.1       mrg 	}
    424  1.68.2.1      yamt #endif /* defined(VMSWAP) */
    425       1.1       mrg 
    426       1.5       mrg 	/*
    427       1.5       mrg 	 * finally free the aobj itself
    428       1.5       mrg 	 */
    429      1.46       chs 
    430  1.68.2.6      yamt 	UVM_OBJ_DESTROY(&aobj->u_obj);
    431      1.12   thorpej 	pool_put(&uvm_aobj_pool, aobj);
    432      1.46       chs 
    433      1.46       chs 	/*
    434      1.46       chs 	 * adjust the counter of pages only in swap for all
    435      1.46       chs 	 * the swap slots we've freed.
    436      1.46       chs 	 */
    437      1.46       chs 
    438      1.48       chs 	if (swpgonlydelta > 0) {
    439  1.68.2.4      yamt 		mutex_enter(&uvm_swap_data_lock);
    440      1.48       chs 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
    441      1.48       chs 		uvmexp.swpgonly -= swpgonlydelta;
    442  1.68.2.4      yamt 		mutex_exit(&uvm_swap_data_lock);
    443      1.48       chs 	}
    444       1.1       mrg }
    445       1.1       mrg 
    446       1.1       mrg /*
    447       1.1       mrg  * pager functions
    448       1.1       mrg  */
    449       1.1       mrg 
    450       1.1       mrg /*
    451       1.1       mrg  * uao_create: create an aobj of the given size and return its uvm_object.
    452       1.1       mrg  *
    453       1.1       mrg  * => for normal use, flags are always zero
    454       1.1       mrg  * => for the kernel object, the flags are:
    455       1.1       mrg  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    456       1.1       mrg  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    457       1.1       mrg  */
    458      1.46       chs 
    459       1.5       mrg struct uvm_object *
    460      1.67   thorpej uao_create(vsize_t size, int flags)
    461       1.5       mrg {
    462      1.46       chs 	static struct uvm_aobj kernel_object_store;
    463      1.46       chs 	static int kobj_alloced = 0;
    464  1.68.2.2      yamt 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    465       1.5       mrg 	struct uvm_aobj *aobj;
    466      1.66      yamt 	int refs;
    467       1.1       mrg 
    468       1.5       mrg 	/*
    469      1.27       chs 	 * malloc a new aobj unless we are asked for the kernel object
    470      1.27       chs 	 */
    471       1.5       mrg 
    472      1.46       chs 	if (flags & UAO_FLAG_KERNOBJ) {
    473      1.46       chs 		KASSERT(!kobj_alloced);
    474       1.5       mrg 		aobj = &kernel_object_store;
    475       1.5       mrg 		aobj->u_pages = pages;
    476      1.46       chs 		aobj->u_flags = UAO_FLAG_NOSWAP;
    477      1.66      yamt 		refs = UVM_OBJ_KERN;
    478       1.5       mrg 		kobj_alloced = UAO_FLAG_KERNOBJ;
    479       1.5       mrg 	} else if (flags & UAO_FLAG_KERNSWAP) {
    480      1.46       chs 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    481       1.5       mrg 		aobj = &kernel_object_store;
    482       1.5       mrg 		kobj_alloced = UAO_FLAG_KERNSWAP;
    483      1.66      yamt 		refs = 0xdeadbeaf; /* XXX: gcc */
    484      1.46       chs 	} else {
    485      1.12   thorpej 		aobj = pool_get(&uvm_aobj_pool, PR_WAITOK);
    486       1.5       mrg 		aobj->u_pages = pages;
    487      1.46       chs 		aobj->u_flags = 0;
    488      1.66      yamt 		refs = 1;
    489       1.5       mrg 	}
    490       1.1       mrg 
    491       1.5       mrg 	/*
    492       1.5       mrg  	 * allocate hash/array if necessary
    493       1.5       mrg  	 *
    494       1.5       mrg  	 * note: in the KERNSWAP case no need to worry about locking since
    495       1.5       mrg  	 * we are still booting we should be the only thread around.
    496       1.5       mrg  	 */
    497      1.46       chs 
    498       1.5       mrg 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    499  1.68.2.1      yamt #if defined(VMSWAP)
    500       1.5       mrg 		int mflags = (flags & UAO_FLAG_KERNSWAP) != 0 ?
    501       1.5       mrg 		    M_NOWAIT : M_WAITOK;
    502       1.5       mrg 
    503       1.5       mrg 		/* allocate hash table or array depending on object size */
    504      1.27       chs 		if (UAO_USES_SWHASH(aobj)) {
    505       1.5       mrg 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    506      1.35        ad 			    HASH_LIST, M_UVMAOBJ, mflags, &aobj->u_swhashmask);
    507       1.5       mrg 			if (aobj->u_swhash == NULL)
    508       1.5       mrg 				panic("uao_create: hashinit swhash failed");
    509       1.5       mrg 		} else {
    510      1.34   thorpej 			aobj->u_swslots = malloc(pages * sizeof(int),
    511       1.5       mrg 			    M_UVMAOBJ, mflags);
    512       1.5       mrg 			if (aobj->u_swslots == NULL)
    513       1.5       mrg 				panic("uao_create: malloc swslots failed");
    514       1.9     perry 			memset(aobj->u_swslots, 0, pages * sizeof(int));
    515       1.5       mrg 		}
    516  1.68.2.1      yamt #endif /* defined(VMSWAP) */
    517       1.5       mrg 
    518       1.5       mrg 		if (flags) {
    519       1.5       mrg 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    520       1.5       mrg 			return(&aobj->u_obj);
    521       1.5       mrg 		}
    522       1.5       mrg 	}
    523       1.5       mrg 
    524       1.5       mrg 	/*
    525       1.5       mrg  	 * init aobj fields
    526       1.5       mrg  	 */
    527      1.46       chs 
    528      1.66      yamt 	UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs);
    529       1.1       mrg 
    530       1.5       mrg 	/*
    531       1.5       mrg  	 * now that aobj is ready, add it to the global list
    532       1.5       mrg  	 */
    533      1.46       chs 
    534  1.68.2.4      yamt 	mutex_enter(&uao_list_lock);
    535       1.5       mrg 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    536  1.68.2.4      yamt 	mutex_exit(&uao_list_lock);
    537       1.5       mrg 	return(&aobj->u_obj);
    538       1.1       mrg }
    539       1.1       mrg 
    540       1.1       mrg 
    541       1.1       mrg 
    542       1.1       mrg /*
    543       1.1       mrg  * uao_init: set up aobj pager subsystem
    544       1.1       mrg  *
    545       1.1       mrg  * => called at boot time from uvm_pager_init()
    546       1.1       mrg  */
    547      1.46       chs 
    548      1.27       chs void
    549      1.46       chs uao_init(void)
    550       1.5       mrg {
    551      1.12   thorpej 	static int uao_initialized;
    552      1.12   thorpej 
    553      1.12   thorpej 	if (uao_initialized)
    554      1.12   thorpej 		return;
    555  1.68.2.3      yamt 	uao_initialized = true;
    556       1.5       mrg 	LIST_INIT(&uao_list);
    557  1.68.2.6      yamt 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    558       1.1       mrg }
    559       1.1       mrg 
    560       1.1       mrg /*
    561       1.1       mrg  * uao_reference: add a ref to an aobj
    562       1.1       mrg  *
    563      1.27       chs  * => aobj must be unlocked
    564      1.27       chs  * => just lock it and call the locked version
    565       1.1       mrg  */
    566      1.46       chs 
    567       1.5       mrg void
    568      1.67   thorpej uao_reference(struct uvm_object *uobj)
    569       1.1       mrg {
    570  1.68.2.6      yamt 	mutex_enter(&uobj->vmobjlock);
    571      1.27       chs 	uao_reference_locked(uobj);
    572  1.68.2.6      yamt 	mutex_exit(&uobj->vmobjlock);
    573      1.27       chs }
    574      1.27       chs 
    575      1.27       chs /*
    576      1.27       chs  * uao_reference_locked: add a ref to an aobj that is already locked
    577      1.27       chs  *
    578      1.27       chs  * => aobj must be locked
    579      1.27       chs  * this needs to be separate from the normal routine
    580      1.27       chs  * since sometimes we need to add a reference to an aobj when
    581      1.27       chs  * it's already locked.
    582      1.27       chs  */
    583      1.46       chs 
    584      1.27       chs void
    585      1.67   thorpej uao_reference_locked(struct uvm_object *uobj)
    586      1.27       chs {
    587       1.5       mrg 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    588       1.1       mrg 
    589       1.5       mrg 	/*
    590       1.5       mrg  	 * kernel_object already has plenty of references, leave it alone.
    591       1.5       mrg  	 */
    592       1.1       mrg 
    593      1.20   thorpej 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    594       1.5       mrg 		return;
    595       1.1       mrg 
    596      1.46       chs 	uobj->uo_refs++;
    597      1.41       chs 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    598      1.27       chs 		    uobj, uobj->uo_refs,0,0);
    599       1.1       mrg }
    600       1.1       mrg 
    601       1.1       mrg /*
    602       1.1       mrg  * uao_detach: drop a reference to an aobj
    603       1.1       mrg  *
    604      1.27       chs  * => aobj must be unlocked
    605      1.27       chs  * => just lock it and call the locked version
    606       1.1       mrg  */
    607      1.46       chs 
    608       1.5       mrg void
    609      1.67   thorpej uao_detach(struct uvm_object *uobj)
    610       1.5       mrg {
    611  1.68.2.6      yamt 	mutex_enter(&uobj->vmobjlock);
    612      1.27       chs 	uao_detach_locked(uobj);
    613      1.27       chs }
    614      1.27       chs 
    615      1.27       chs /*
    616      1.27       chs  * uao_detach_locked: drop a reference to an aobj
    617      1.27       chs  *
    618      1.27       chs  * => aobj must be locked, and is unlocked (or freed) upon return.
    619      1.27       chs  * this needs to be separate from the normal routine
    620      1.27       chs  * since sometimes we need to detach from an aobj when
    621      1.27       chs  * it's already locked.
    622      1.27       chs  */
    623      1.46       chs 
    624      1.27       chs void
    625      1.67   thorpej uao_detach_locked(struct uvm_object *uobj)
    626      1.27       chs {
    627       1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    628      1.46       chs 	struct vm_page *pg;
    629       1.5       mrg 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    630       1.1       mrg 
    631       1.5       mrg 	/*
    632       1.5       mrg  	 * detaching from kernel_object is a noop.
    633       1.5       mrg  	 */
    634      1.46       chs 
    635      1.27       chs 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    636  1.68.2.6      yamt 		mutex_exit(&uobj->vmobjlock);
    637       1.5       mrg 		return;
    638      1.27       chs 	}
    639       1.5       mrg 
    640       1.5       mrg 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    641      1.46       chs 	uobj->uo_refs--;
    642      1.46       chs 	if (uobj->uo_refs) {
    643  1.68.2.6      yamt 		mutex_exit(&uobj->vmobjlock);
    644       1.5       mrg 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    645       1.5       mrg 		return;
    646       1.5       mrg 	}
    647       1.5       mrg 
    648       1.5       mrg 	/*
    649       1.5       mrg  	 * remove the aobj from the global list.
    650       1.5       mrg  	 */
    651      1.46       chs 
    652  1.68.2.4      yamt 	mutex_enter(&uao_list_lock);
    653       1.5       mrg 	LIST_REMOVE(aobj, u_list);
    654  1.68.2.4      yamt 	mutex_exit(&uao_list_lock);
    655       1.5       mrg 
    656       1.5       mrg 	/*
    657      1.46       chs  	 * free all the pages left in the aobj.  for each page,
    658      1.46       chs 	 * when the page is no longer busy (and thus after any disk i/o that
    659      1.46       chs 	 * it's involved in is complete), release any swap resources and
    660      1.46       chs 	 * free the page itself.
    661       1.5       mrg  	 */
    662      1.46       chs 
    663  1.68.2.6      yamt 	mutex_enter(&uvm_pageqlock);
    664      1.46       chs 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    665      1.46       chs 		pmap_page_protect(pg, VM_PROT_NONE);
    666       1.5       mrg 		if (pg->flags & PG_BUSY) {
    667      1.46       chs 			pg->flags |= PG_WANTED;
    668  1.68.2.6      yamt 			mutex_exit(&uvm_pageqlock);
    669  1.68.2.3      yamt 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false,
    670      1.46       chs 			    "uao_det", 0);
    671  1.68.2.6      yamt 			mutex_enter(&uobj->vmobjlock);
    672  1.68.2.6      yamt 			mutex_enter(&uvm_pageqlock);
    673       1.5       mrg 			continue;
    674       1.5       mrg 		}
    675      1.18       chs 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    676       1.5       mrg 		uvm_pagefree(pg);
    677       1.5       mrg 	}
    678  1.68.2.6      yamt 	mutex_exit(&uvm_pageqlock);
    679       1.1       mrg 
    680       1.5       mrg 	/*
    681      1.46       chs  	 * finally, free the aobj itself.
    682       1.5       mrg  	 */
    683       1.1       mrg 
    684       1.5       mrg 	uao_free(aobj);
    685       1.5       mrg }
    686       1.1       mrg 
    687       1.1       mrg /*
    688      1.46       chs  * uao_put: flush pages out of a uvm object
    689      1.22   thorpej  *
    690      1.22   thorpej  * => object should be locked by caller.  we may _unlock_ the object
    691      1.22   thorpej  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    692      1.22   thorpej  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    693      1.22   thorpej  *	XXXJRT a page, we simply just deactivate it.  Should probably
    694      1.22   thorpej  *	XXXJRT handle this better, in the future (although "flushing"
    695      1.22   thorpej  *	XXXJRT anonymous memory isn't terribly important).
    696      1.22   thorpej  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    697      1.22   thorpej  *	or block.
    698      1.22   thorpej  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    699      1.22   thorpej  *	for flushing.
    700      1.22   thorpej  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    701      1.22   thorpej  *	that new pages are inserted on the tail end of the list.  thus,
    702      1.22   thorpej  *	we can make a complete pass through the object in one go by starting
    703      1.22   thorpej  *	at the head and working towards the tail (new pages are put in
    704      1.22   thorpej  *	front of us).
    705      1.22   thorpej  * => NOTE: we are allowed to lock the page queues, so the caller
    706      1.22   thorpej  *	must not be holding the lock on them [e.g. pagedaemon had
    707      1.22   thorpej  *	better not call us with the queues locked]
    708  1.68.2.3      yamt  * => we return 0 unless we encountered some sort of I/O error
    709      1.22   thorpej  *	XXXJRT currently never happens, as we never directly initiate
    710      1.22   thorpej  *	XXXJRT I/O
    711      1.22   thorpej  *
    712      1.22   thorpej  * note on page traversal:
    713      1.22   thorpej  *	we can traverse the pages in an object either by going down the
    714      1.22   thorpej  *	linked list in "uobj->memq", or we can go over the address range
    715      1.22   thorpej  *	by page doing hash table lookups for each address.  depending
    716      1.22   thorpej  *	on how many pages are in the object it may be cheaper to do one
    717      1.22   thorpej  *	or the other.  we set "by_list" to true if we are using memq.
    718      1.22   thorpej  *	if the cost of a hash lookup was equal to the cost of the list
    719      1.22   thorpej  *	traversal we could compare the number of pages in the start->stop
    720      1.22   thorpej  *	range to the total number of pages in the object.  however, it
    721      1.22   thorpej  *	seems that a hash table lookup is more expensive than the linked
    722      1.22   thorpej  *	list traversal, so we multiply the number of pages in the
    723      1.22   thorpej  *	start->stop range by a penalty which we define below.
    724       1.1       mrg  */
    725      1.22   thorpej 
    726      1.68   thorpej static int
    727      1.67   thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    728       1.5       mrg {
    729      1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    730      1.51     enami 	struct vm_page *pg, *nextpg, curmp, endmp;
    731  1.68.2.3      yamt 	bool by_list;
    732      1.28    kleink 	voff_t curoff;
    733      1.46       chs 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    734      1.22   thorpej 
    735  1.68.2.6      yamt 	KASSERT(mutex_owned(&uobj->vmobjlock));
    736  1.68.2.6      yamt 
    737      1.46       chs 	curoff = 0;
    738      1.22   thorpej 	if (flags & PGO_ALLPAGES) {
    739      1.22   thorpej 		start = 0;
    740      1.22   thorpej 		stop = aobj->u_pages << PAGE_SHIFT;
    741  1.68.2.3      yamt 		by_list = true;		/* always go by the list */
    742      1.22   thorpej 	} else {
    743      1.22   thorpej 		start = trunc_page(start);
    744  1.68.2.1      yamt 		if (stop == 0) {
    745  1.68.2.1      yamt 			stop = aobj->u_pages << PAGE_SHIFT;
    746  1.68.2.1      yamt 		} else {
    747  1.68.2.1      yamt 			stop = round_page(stop);
    748  1.68.2.1      yamt 		}
    749      1.22   thorpej 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    750      1.22   thorpej 			printf("uao_flush: strange, got an out of range "
    751      1.22   thorpej 			    "flush (fixed)\n");
    752      1.22   thorpej 			stop = aobj->u_pages << PAGE_SHIFT;
    753      1.22   thorpej 		}
    754      1.22   thorpej 		by_list = (uobj->uo_npages <=
    755      1.46       chs 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
    756      1.22   thorpej 	}
    757      1.22   thorpej 	UVMHIST_LOG(maphist,
    758      1.22   thorpej 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    759      1.22   thorpej 	    start, stop, by_list, flags);
    760       1.1       mrg 
    761       1.5       mrg 	/*
    762      1.22   thorpej 	 * Don't need to do any work here if we're not freeing
    763      1.22   thorpej 	 * or deactivating pages.
    764      1.22   thorpej 	 */
    765      1.46       chs 
    766      1.22   thorpej 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    767  1.68.2.6      yamt 		mutex_exit(&uobj->vmobjlock);
    768      1.46       chs 		return 0;
    769      1.22   thorpej 	}
    770      1.22   thorpej 
    771       1.5       mrg 	/*
    772      1.51     enami 	 * Initialize the marker pages.  See the comment in
    773      1.51     enami 	 * genfs_putpages() also.
    774      1.51     enami 	 */
    775      1.51     enami 
    776      1.51     enami 	curmp.uobject = uobj;
    777      1.51     enami 	curmp.offset = (voff_t)-1;
    778      1.51     enami 	curmp.flags = PG_BUSY;
    779      1.51     enami 	endmp.uobject = uobj;
    780      1.51     enami 	endmp.offset = (voff_t)-1;
    781      1.51     enami 	endmp.flags = PG_BUSY;
    782      1.51     enami 
    783      1.51     enami 	/*
    784      1.46       chs 	 * now do it.  note: we must update nextpg in the body of loop or we
    785      1.51     enami 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    786      1.51     enami 	 * because we may free "pg" before doing the next loop.
    787      1.21   thorpej 	 */
    788      1.22   thorpej 
    789      1.22   thorpej 	if (by_list) {
    790      1.51     enami 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
    791      1.51     enami 		nextpg = TAILQ_FIRST(&uobj->memq);
    792  1.68.2.4      yamt 		uvm_lwp_hold(curlwp);
    793      1.22   thorpej 	} else {
    794      1.22   thorpej 		curoff = start;
    795      1.52       scw 		nextpg = NULL;	/* Quell compiler warning */
    796      1.22   thorpej 	}
    797      1.22   thorpej 
    798  1.68.2.7      yamt 	/* locked: uobj */
    799      1.51     enami 	for (;;) {
    800      1.22   thorpej 		if (by_list) {
    801      1.51     enami 			pg = nextpg;
    802      1.51     enami 			if (pg == &endmp)
    803      1.51     enami 				break;
    804      1.46       chs 			nextpg = TAILQ_NEXT(pg, listq);
    805      1.46       chs 			if (pg->offset < start || pg->offset >= stop)
    806      1.22   thorpej 				continue;
    807      1.22   thorpej 		} else {
    808      1.51     enami 			if (curoff < stop) {
    809      1.51     enami 				pg = uvm_pagelookup(uobj, curoff);
    810      1.51     enami 				curoff += PAGE_SIZE;
    811      1.51     enami 			} else
    812      1.51     enami 				break;
    813      1.46       chs 			if (pg == NULL)
    814      1.22   thorpej 				continue;
    815      1.22   thorpej 		}
    816  1.68.2.7      yamt 
    817  1.68.2.7      yamt 		/*
    818  1.68.2.7      yamt 		 * wait and try again if the page is busy.
    819  1.68.2.7      yamt 		 */
    820  1.68.2.7      yamt 
    821  1.68.2.7      yamt 		if (pg->flags & PG_BUSY) {
    822  1.68.2.7      yamt 			if (by_list) {
    823  1.68.2.7      yamt 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
    824  1.68.2.7      yamt 			}
    825  1.68.2.7      yamt 			pg->flags |= PG_WANTED;
    826  1.68.2.7      yamt 			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
    827  1.68.2.7      yamt 			    "uao_put", 0);
    828  1.68.2.7      yamt 			mutex_enter(&uobj->vmobjlock);
    829  1.68.2.7      yamt 			if (by_list) {
    830  1.68.2.7      yamt 				nextpg = TAILQ_NEXT(&curmp, listq);
    831  1.68.2.7      yamt 				TAILQ_REMOVE(&uobj->memq, &curmp,
    832  1.68.2.7      yamt 				    listq);
    833  1.68.2.7      yamt 			} else
    834  1.68.2.7      yamt 				curoff -= PAGE_SIZE;
    835  1.68.2.7      yamt 			continue;
    836  1.68.2.7      yamt 		}
    837  1.68.2.7      yamt 
    838      1.46       chs 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    839      1.41       chs 
    840      1.22   thorpej 		/*
    841      1.22   thorpej 		 * XXX In these first 3 cases, we always just
    842      1.22   thorpej 		 * XXX deactivate the page.  We may want to
    843      1.22   thorpej 		 * XXX handle the different cases more specifically
    844      1.22   thorpej 		 * XXX in the future.
    845      1.22   thorpej 		 */
    846      1.46       chs 
    847      1.22   thorpej 		case PGO_CLEANIT|PGO_FREE:
    848      1.22   thorpej 		case PGO_CLEANIT|PGO_DEACTIVATE:
    849      1.22   thorpej 		case PGO_DEACTIVATE:
    850      1.25   thorpej  deactivate_it:
    851  1.68.2.7      yamt 			mutex_enter(&uvm_pageqlock);
    852  1.68.2.2      yamt 			/* skip the page if it's wired */
    853  1.68.2.7      yamt 			if (pg->wire_count == 0) {
    854  1.68.2.7      yamt 				uvm_pagedeactivate(pg);
    855  1.68.2.7      yamt 			}
    856  1.68.2.7      yamt 			mutex_exit(&uvm_pageqlock);
    857  1.68.2.7      yamt 			break;
    858      1.22   thorpej 
    859      1.22   thorpej 		case PGO_FREE:
    860      1.25   thorpej 			/*
    861      1.25   thorpej 			 * If there are multiple references to
    862      1.25   thorpej 			 * the object, just deactivate the page.
    863      1.25   thorpej 			 */
    864      1.46       chs 
    865      1.25   thorpej 			if (uobj->uo_refs > 1)
    866      1.25   thorpej 				goto deactivate_it;
    867      1.25   thorpej 
    868      1.22   thorpej 			/*
    869  1.68.2.7      yamt 			 * free the swap slot and the page.
    870      1.22   thorpej 			 */
    871      1.46       chs 
    872      1.46       chs 			pmap_page_protect(pg, VM_PROT_NONE);
    873  1.68.2.1      yamt 
    874  1.68.2.1      yamt 			/*
    875  1.68.2.1      yamt 			 * freeing swapslot here is not strictly necessary.
    876  1.68.2.1      yamt 			 * however, leaving it here doesn't save much
    877  1.68.2.1      yamt 			 * because we need to update swap accounting anyway.
    878  1.68.2.1      yamt 			 */
    879  1.68.2.1      yamt 
    880      1.46       chs 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    881  1.68.2.7      yamt 			mutex_enter(&uvm_pageqlock);
    882      1.46       chs 			uvm_pagefree(pg);
    883  1.68.2.7      yamt 			mutex_exit(&uvm_pageqlock);
    884  1.68.2.7      yamt 			break;
    885  1.68.2.7      yamt 
    886  1.68.2.7      yamt 		default:
    887  1.68.2.7      yamt 			panic("%s: impossible", __func__);
    888      1.22   thorpej 		}
    889      1.22   thorpej 	}
    890      1.51     enami 	if (by_list) {
    891      1.51     enami 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
    892  1.68.2.4      yamt 		uvm_lwp_rele(curlwp);
    893  1.68.2.4      yamt 	}
    894  1.68.2.6      yamt 	mutex_exit(&uobj->vmobjlock);
    895      1.46       chs 	return 0;
    896       1.1       mrg }
    897       1.1       mrg 
    898       1.1       mrg /*
    899       1.1       mrg  * uao_get: fetch me a page
    900       1.1       mrg  *
    901       1.1       mrg  * we have three cases:
    902       1.1       mrg  * 1: page is resident     -> just return the page.
    903       1.1       mrg  * 2: page is zero-fill    -> allocate a new page and zero it.
    904       1.1       mrg  * 3: page is swapped out  -> fetch the page from swap.
    905       1.1       mrg  *
    906       1.1       mrg  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    907       1.1       mrg  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    908      1.40       chs  * then we will need to return EBUSY.
    909       1.1       mrg  *
    910       1.1       mrg  * => prefer map unlocked (not required)
    911       1.1       mrg  * => object must be locked!  we will _unlock_ it before starting any I/O.
    912       1.1       mrg  * => flags: PGO_ALLPAGES: get all of the pages
    913       1.1       mrg  *           PGO_LOCKED: fault data structures are locked
    914       1.1       mrg  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    915       1.1       mrg  * => NOTE: caller must check for released pages!!
    916       1.1       mrg  */
    917      1.46       chs 
    918       1.5       mrg static int
    919      1.67   thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    920      1.67   thorpej     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    921       1.5       mrg {
    922  1.68.2.1      yamt #if defined(VMSWAP)
    923       1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    924  1.68.2.1      yamt #endif /* defined(VMSWAP) */
    925      1.28    kleink 	voff_t current_offset;
    926      1.52       scw 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    927  1.68.2.1      yamt 	int lcv, gotpages, maxpages, swslot, pageidx;
    928  1.68.2.3      yamt 	bool done;
    929       1.5       mrg 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    930       1.5       mrg 
    931      1.27       chs 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    932  1.68.2.1      yamt 		    (struct uvm_aobj *)uobj, offset, flags,0);
    933      1.37       chs 
    934       1.5       mrg 	/*
    935       1.5       mrg  	 * get number of pages
    936       1.5       mrg  	 */
    937      1.46       chs 
    938       1.5       mrg 	maxpages = *npagesp;
    939       1.5       mrg 
    940       1.5       mrg 	/*
    941       1.5       mrg  	 * step 1: handled the case where fault data structures are locked.
    942       1.5       mrg  	 */
    943       1.1       mrg 
    944       1.5       mrg 	if (flags & PGO_LOCKED) {
    945      1.46       chs 
    946       1.5       mrg 		/*
    947       1.5       mrg  		 * step 1a: get pages that are already resident.   only do
    948       1.5       mrg 		 * this if the data structures are locked (i.e. the first
    949       1.5       mrg 		 * time through).
    950       1.5       mrg  		 */
    951       1.5       mrg 
    952  1.68.2.3      yamt 		done = true;	/* be optimistic */
    953       1.5       mrg 		gotpages = 0;	/* # of pages we got so far */
    954       1.5       mrg 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    955       1.5       mrg 		    lcv++, current_offset += PAGE_SIZE) {
    956       1.5       mrg 			/* do we care about this page?  if not, skip it */
    957       1.5       mrg 			if (pps[lcv] == PGO_DONTCARE)
    958       1.5       mrg 				continue;
    959       1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
    960       1.5       mrg 
    961       1.5       mrg 			/*
    962      1.30   thorpej  			 * if page is new, attempt to allocate the page,
    963      1.30   thorpej 			 * zero-fill'd.
    964       1.5       mrg  			 */
    965      1.46       chs 
    966      1.46       chs 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
    967      1.15       chs 			    current_offset >> PAGE_SHIFT) == 0) {
    968       1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
    969      1.30   thorpej 				    NULL, UVM_PGA_ZERO);
    970       1.5       mrg 				if (ptmp) {
    971       1.5       mrg 					/* new page */
    972      1.47       chs 					ptmp->flags &= ~(PG_FAKE);
    973       1.5       mrg 					ptmp->pqflags |= PQ_AOBJ;
    974      1.47       chs 					goto gotpage;
    975       1.5       mrg 				}
    976       1.5       mrg 			}
    977       1.5       mrg 
    978       1.5       mrg 			/*
    979      1.46       chs 			 * to be useful must get a non-busy page
    980       1.5       mrg 			 */
    981      1.46       chs 
    982      1.46       chs 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
    983       1.5       mrg 				if (lcv == centeridx ||
    984       1.5       mrg 				    (flags & PGO_ALLPAGES) != 0)
    985       1.5       mrg 					/* need to do a wait or I/O! */
    986  1.68.2.3      yamt 					done = false;
    987       1.5       mrg 					continue;
    988       1.5       mrg 			}
    989       1.5       mrg 
    990       1.5       mrg 			/*
    991       1.5       mrg 			 * useful page: busy/lock it and plug it in our
    992       1.5       mrg 			 * result array
    993       1.5       mrg 			 */
    994      1.46       chs 
    995       1.5       mrg 			/* caller must un-busy this page */
    996      1.41       chs 			ptmp->flags |= PG_BUSY;
    997       1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get1");
    998      1.47       chs gotpage:
    999       1.5       mrg 			pps[lcv] = ptmp;
   1000       1.5       mrg 			gotpages++;
   1001      1.46       chs 		}
   1002       1.5       mrg 
   1003       1.5       mrg 		/*
   1004       1.5       mrg  		 * step 1b: now we've either done everything needed or we
   1005       1.5       mrg 		 * to unlock and do some waiting or I/O.
   1006       1.5       mrg  		 */
   1007       1.5       mrg 
   1008       1.5       mrg 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1009       1.5       mrg 		*npagesp = gotpages;
   1010       1.5       mrg 		if (done)
   1011      1.46       chs 			return 0;
   1012       1.5       mrg 		else
   1013      1.46       chs 			return EBUSY;
   1014       1.1       mrg 	}
   1015       1.1       mrg 
   1016       1.5       mrg 	/*
   1017       1.5       mrg  	 * step 2: get non-resident or busy pages.
   1018       1.5       mrg  	 * object is locked.   data structures are unlocked.
   1019       1.5       mrg  	 */
   1020       1.5       mrg 
   1021  1.68.2.1      yamt 	if ((flags & PGO_SYNCIO) == 0) {
   1022  1.68.2.1      yamt 		goto done;
   1023  1.68.2.1      yamt 	}
   1024  1.68.2.1      yamt 
   1025       1.5       mrg 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1026       1.5       mrg 	    lcv++, current_offset += PAGE_SIZE) {
   1027      1.27       chs 
   1028       1.5       mrg 		/*
   1029       1.5       mrg 		 * - skip over pages we've already gotten or don't want
   1030       1.5       mrg 		 * - skip over pages we don't _have_ to get
   1031       1.5       mrg 		 */
   1032      1.27       chs 
   1033       1.5       mrg 		if (pps[lcv] != NULL ||
   1034       1.5       mrg 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1035       1.5       mrg 			continue;
   1036       1.5       mrg 
   1037      1.27       chs 		pageidx = current_offset >> PAGE_SHIFT;
   1038      1.27       chs 
   1039       1.5       mrg 		/*
   1040       1.5       mrg  		 * we have yet to locate the current page (pps[lcv]).   we
   1041       1.5       mrg 		 * first look for a page that is already at the current offset.
   1042       1.5       mrg 		 * if we find a page, we check to see if it is busy or
   1043       1.5       mrg 		 * released.  if that is the case, then we sleep on the page
   1044       1.5       mrg 		 * until it is no longer busy or released and repeat the lookup.
   1045       1.5       mrg 		 * if the page we found is neither busy nor released, then we
   1046       1.5       mrg 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1047       1.5       mrg 		 * 'break's the following while loop and indicates we are
   1048       1.5       mrg 		 * ready to move on to the next page in the "lcv" loop above.
   1049       1.5       mrg  		 *
   1050       1.5       mrg  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1051       1.5       mrg 		 * then it means that we allocated a new busy/fake/clean page
   1052       1.5       mrg 		 * ptmp in the object and we need to do I/O to fill in the data.
   1053       1.5       mrg  		 */
   1054       1.5       mrg 
   1055       1.5       mrg 		/* top of "pps" while loop */
   1056       1.5       mrg 		while (pps[lcv] == NULL) {
   1057       1.5       mrg 			/* look for a resident page */
   1058       1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
   1059       1.5       mrg 
   1060       1.5       mrg 			/* not resident?   allocate one now (if we can) */
   1061       1.5       mrg 			if (ptmp == NULL) {
   1062       1.5       mrg 
   1063       1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
   1064      1.19       chs 				    NULL, 0);
   1065       1.5       mrg 
   1066       1.5       mrg 				/* out of RAM? */
   1067       1.5       mrg 				if (ptmp == NULL) {
   1068  1.68.2.6      yamt 					mutex_exit(&uobj->vmobjlock);
   1069       1.5       mrg 					UVMHIST_LOG(pdhist,
   1070       1.5       mrg 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1071       1.5       mrg 					uvm_wait("uao_getpage");
   1072  1.68.2.6      yamt 					mutex_enter(&uobj->vmobjlock);
   1073      1.41       chs 					continue;
   1074       1.5       mrg 				}
   1075       1.5       mrg 
   1076       1.5       mrg 				/*
   1077       1.5       mrg 				 * safe with PQ's unlocked: because we just
   1078       1.5       mrg 				 * alloc'd the page
   1079       1.5       mrg 				 */
   1080      1.46       chs 
   1081       1.5       mrg 				ptmp->pqflags |= PQ_AOBJ;
   1082       1.5       mrg 
   1083      1.41       chs 				/*
   1084       1.5       mrg 				 * got new page ready for I/O.  break pps while
   1085       1.5       mrg 				 * loop.  pps[lcv] is still NULL.
   1086       1.5       mrg 				 */
   1087      1.46       chs 
   1088       1.5       mrg 				break;
   1089       1.5       mrg 			}
   1090       1.5       mrg 
   1091       1.5       mrg 			/* page is there, see if we need to wait on it */
   1092      1.46       chs 			if ((ptmp->flags & PG_BUSY) != 0) {
   1093       1.5       mrg 				ptmp->flags |= PG_WANTED;
   1094       1.5       mrg 				UVMHIST_LOG(pdhist,
   1095       1.5       mrg 				    "sleeping, ptmp->flags 0x%x\n",
   1096       1.5       mrg 				    ptmp->flags,0,0,0);
   1097      1.23   thorpej 				UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock,
   1098  1.68.2.3      yamt 				    false, "uao_get", 0);
   1099  1.68.2.6      yamt 				mutex_enter(&uobj->vmobjlock);
   1100      1.46       chs 				continue;
   1101       1.5       mrg 			}
   1102      1.41       chs 
   1103      1.41       chs 			/*
   1104       1.5       mrg  			 * if we get here then the page has become resident and
   1105       1.5       mrg 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1106       1.5       mrg 			 * own it) and set pps[lcv] (so that we exit the while
   1107       1.5       mrg 			 * loop).
   1108       1.5       mrg  			 */
   1109      1.46       chs 
   1110       1.5       mrg 			/* we own it, caller must un-busy */
   1111       1.5       mrg 			ptmp->flags |= PG_BUSY;
   1112       1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1113       1.5       mrg 			pps[lcv] = ptmp;
   1114       1.5       mrg 		}
   1115       1.5       mrg 
   1116       1.5       mrg 		/*
   1117       1.5       mrg  		 * if we own the valid page at the correct offset, pps[lcv] will
   1118       1.5       mrg  		 * point to it.   nothing more to do except go to the next page.
   1119       1.5       mrg  		 */
   1120      1.46       chs 
   1121       1.5       mrg 		if (pps[lcv])
   1122       1.5       mrg 			continue;			/* next lcv */
   1123       1.5       mrg 
   1124       1.5       mrg 		/*
   1125      1.41       chs  		 * we have a "fake/busy/clean" page that we just allocated.
   1126       1.5       mrg  		 * do the needed "i/o", either reading from swap or zeroing.
   1127       1.5       mrg  		 */
   1128      1.46       chs 
   1129      1.46       chs 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1130       1.5       mrg 
   1131       1.5       mrg 		/*
   1132       1.5       mrg  		 * just zero the page if there's nothing in swap.
   1133       1.5       mrg  		 */
   1134      1.46       chs 
   1135      1.46       chs 		if (swslot == 0) {
   1136      1.46       chs 
   1137       1.5       mrg 			/*
   1138       1.5       mrg 			 * page hasn't existed before, just zero it.
   1139       1.5       mrg 			 */
   1140      1.46       chs 
   1141       1.5       mrg 			uvm_pagezero(ptmp);
   1142      1.27       chs 		} else {
   1143  1.68.2.1      yamt #if defined(VMSWAP)
   1144  1.68.2.1      yamt 			int error;
   1145  1.68.2.1      yamt 
   1146       1.5       mrg 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1147       1.5       mrg 			     swslot, 0,0,0);
   1148       1.5       mrg 
   1149       1.5       mrg 			/*
   1150       1.5       mrg 			 * page in the swapped-out page.
   1151       1.5       mrg 			 * unlock object for i/o, relock when done.
   1152       1.5       mrg 			 */
   1153      1.46       chs 
   1154  1.68.2.6      yamt 			mutex_exit(&uobj->vmobjlock);
   1155      1.46       chs 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1156  1.68.2.6      yamt 			mutex_enter(&uobj->vmobjlock);
   1157       1.5       mrg 
   1158       1.5       mrg 			/*
   1159       1.5       mrg 			 * I/O done.  check for errors.
   1160       1.5       mrg 			 */
   1161      1.46       chs 
   1162      1.46       chs 			if (error != 0) {
   1163       1.5       mrg 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1164      1.46       chs 				    error,0,0,0);
   1165       1.5       mrg 				if (ptmp->flags & PG_WANTED)
   1166      1.24   thorpej 					wakeup(ptmp);
   1167      1.27       chs 
   1168      1.27       chs 				/*
   1169      1.27       chs 				 * remove the swap slot from the aobj
   1170      1.27       chs 				 * and mark the aobj as having no real slot.
   1171      1.27       chs 				 * don't free the swap slot, thus preventing
   1172      1.27       chs 				 * it from being used again.
   1173      1.27       chs 				 */
   1174      1.46       chs 
   1175      1.27       chs 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1176      1.27       chs 							SWSLOT_BAD);
   1177      1.57        pk 				if (swslot > 0) {
   1178      1.45       chs 					uvm_swap_markbad(swslot, 1);
   1179      1.45       chs 				}
   1180      1.27       chs 
   1181  1.68.2.6      yamt 				mutex_enter(&uvm_pageqlock);
   1182       1.5       mrg 				uvm_pagefree(ptmp);
   1183  1.68.2.6      yamt 				mutex_exit(&uvm_pageqlock);
   1184  1.68.2.6      yamt 				mutex_exit(&uobj->vmobjlock);
   1185      1.46       chs 				return error;
   1186       1.5       mrg 			}
   1187  1.68.2.1      yamt #else /* defined(VMSWAP) */
   1188  1.68.2.1      yamt 			panic("%s: pagein", __func__);
   1189  1.68.2.1      yamt #endif /* defined(VMSWAP) */
   1190  1.68.2.1      yamt 		}
   1191  1.68.2.1      yamt 
   1192  1.68.2.1      yamt 		if ((access_type & VM_PROT_WRITE) == 0) {
   1193  1.68.2.1      yamt 			ptmp->flags |= PG_CLEAN;
   1194  1.68.2.1      yamt 			pmap_clear_modify(ptmp);
   1195       1.5       mrg 		}
   1196       1.5       mrg 
   1197      1.41       chs 		/*
   1198       1.5       mrg  		 * we got the page!   clear the fake flag (indicates valid
   1199       1.5       mrg 		 * data now in page) and plug into our result array.   note
   1200      1.41       chs 		 * that page is still busy.
   1201       1.5       mrg  		 *
   1202       1.5       mrg  		 * it is the callers job to:
   1203       1.5       mrg  		 * => check if the page is released
   1204       1.5       mrg  		 * => unbusy the page
   1205       1.5       mrg  		 * => activate the page
   1206       1.5       mrg  		 */
   1207       1.5       mrg 
   1208      1.46       chs 		ptmp->flags &= ~PG_FAKE;
   1209       1.5       mrg 		pps[lcv] = ptmp;
   1210      1.46       chs 	}
   1211       1.1       mrg 
   1212       1.1       mrg 	/*
   1213       1.5       mrg  	 * finally, unlock object and return.
   1214       1.5       mrg  	 */
   1215       1.1       mrg 
   1216  1.68.2.1      yamt done:
   1217  1.68.2.6      yamt 	mutex_exit(&uobj->vmobjlock);
   1218       1.5       mrg 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1219      1.46       chs 	return 0;
   1220       1.1       mrg }
   1221       1.1       mrg 
   1222  1.68.2.1      yamt #if defined(VMSWAP)
   1223  1.68.2.1      yamt 
   1224       1.1       mrg /*
   1225      1.18       chs  * uao_dropswap:  release any swap resources from this aobj page.
   1226      1.41       chs  *
   1227      1.18       chs  * => aobj must be locked or have a reference count of 0.
   1228      1.18       chs  */
   1229      1.18       chs 
   1230      1.18       chs void
   1231      1.67   thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
   1232      1.18       chs {
   1233      1.18       chs 	int slot;
   1234      1.18       chs 
   1235      1.18       chs 	slot = uao_set_swslot(uobj, pageidx, 0);
   1236      1.18       chs 	if (slot) {
   1237      1.18       chs 		uvm_swap_free(slot, 1);
   1238      1.18       chs 	}
   1239      1.27       chs }
   1240      1.27       chs 
   1241      1.27       chs /*
   1242      1.27       chs  * page in every page in every aobj that is paged-out to a range of swslots.
   1243      1.41       chs  *
   1244      1.27       chs  * => nothing should be locked.
   1245  1.68.2.3      yamt  * => returns true if pagein was aborted due to lack of memory.
   1246      1.27       chs  */
   1247      1.46       chs 
   1248  1.68.2.3      yamt bool
   1249      1.67   thorpej uao_swap_off(int startslot, int endslot)
   1250      1.27       chs {
   1251      1.27       chs 	struct uvm_aobj *aobj, *nextaobj;
   1252  1.68.2.3      yamt 	bool rv;
   1253      1.27       chs 
   1254      1.27       chs 	/*
   1255      1.27       chs 	 * walk the list of all aobjs.
   1256      1.27       chs 	 */
   1257      1.27       chs 
   1258      1.27       chs restart:
   1259  1.68.2.4      yamt 	mutex_enter(&uao_list_lock);
   1260      1.27       chs 	for (aobj = LIST_FIRST(&uao_list);
   1261      1.27       chs 	     aobj != NULL;
   1262      1.27       chs 	     aobj = nextaobj) {
   1263      1.27       chs 
   1264      1.27       chs 		/*
   1265      1.46       chs 		 * try to get the object lock, start all over if we fail.
   1266      1.27       chs 		 * most of the time we'll get the aobj lock,
   1267      1.27       chs 		 * so this should be a rare case.
   1268      1.27       chs 		 */
   1269      1.46       chs 
   1270  1.68.2.6      yamt 		if (!mutex_tryenter(&aobj->u_obj.vmobjlock)) {
   1271  1.68.2.4      yamt 			mutex_exit(&uao_list_lock);
   1272  1.68.2.6      yamt 			/* XXX Better than yielding but inadequate. */
   1273  1.68.2.6      yamt 			kpause("livelock", false, 1, NULL);
   1274      1.27       chs 			goto restart;
   1275      1.27       chs 		}
   1276      1.27       chs 
   1277      1.27       chs 		/*
   1278      1.27       chs 		 * add a ref to the aobj so it doesn't disappear
   1279      1.27       chs 		 * while we're working.
   1280      1.27       chs 		 */
   1281      1.46       chs 
   1282      1.27       chs 		uao_reference_locked(&aobj->u_obj);
   1283      1.27       chs 
   1284      1.27       chs 		/*
   1285      1.27       chs 		 * now it's safe to unlock the uao list.
   1286      1.27       chs 		 */
   1287      1.46       chs 
   1288  1.68.2.4      yamt 		mutex_exit(&uao_list_lock);
   1289      1.27       chs 
   1290      1.27       chs 		/*
   1291      1.27       chs 		 * page in any pages in the swslot range.
   1292      1.27       chs 		 * if there's an error, abort and return the error.
   1293      1.27       chs 		 */
   1294      1.46       chs 
   1295      1.27       chs 		rv = uao_pagein(aobj, startslot, endslot);
   1296      1.27       chs 		if (rv) {
   1297      1.27       chs 			uao_detach_locked(&aobj->u_obj);
   1298      1.27       chs 			return rv;
   1299      1.27       chs 		}
   1300      1.27       chs 
   1301      1.27       chs 		/*
   1302      1.27       chs 		 * we're done with this aobj.
   1303      1.27       chs 		 * relock the list and drop our ref on the aobj.
   1304      1.27       chs 		 */
   1305      1.46       chs 
   1306  1.68.2.4      yamt 		mutex_enter(&uao_list_lock);
   1307      1.27       chs 		nextaobj = LIST_NEXT(aobj, u_list);
   1308      1.27       chs 		uao_detach_locked(&aobj->u_obj);
   1309      1.27       chs 	}
   1310      1.27       chs 
   1311      1.27       chs 	/*
   1312      1.27       chs 	 * done with traversal, unlock the list
   1313      1.27       chs 	 */
   1314  1.68.2.4      yamt 	mutex_exit(&uao_list_lock);
   1315  1.68.2.3      yamt 	return false;
   1316      1.27       chs }
   1317      1.27       chs 
   1318      1.27       chs 
   1319      1.27       chs /*
   1320      1.27       chs  * page in any pages from aobj in the given range.
   1321      1.27       chs  *
   1322      1.27       chs  * => aobj must be locked and is returned locked.
   1323  1.68.2.3      yamt  * => returns true if pagein was aborted due to lack of memory.
   1324      1.27       chs  */
   1325  1.68.2.3      yamt static bool
   1326      1.67   thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1327      1.27       chs {
   1328  1.68.2.3      yamt 	bool rv;
   1329      1.27       chs 
   1330      1.27       chs 	if (UAO_USES_SWHASH(aobj)) {
   1331      1.27       chs 		struct uao_swhash_elt *elt;
   1332      1.65  christos 		int buck;
   1333      1.27       chs 
   1334      1.27       chs restart:
   1335      1.65  christos 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1336      1.65  christos 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1337      1.27       chs 			     elt != NULL;
   1338      1.27       chs 			     elt = LIST_NEXT(elt, list)) {
   1339      1.27       chs 				int i;
   1340      1.27       chs 
   1341      1.27       chs 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1342      1.27       chs 					int slot = elt->slots[i];
   1343      1.27       chs 
   1344      1.27       chs 					/*
   1345      1.27       chs 					 * if the slot isn't in range, skip it.
   1346      1.27       chs 					 */
   1347      1.46       chs 
   1348      1.41       chs 					if (slot < startslot ||
   1349      1.27       chs 					    slot >= endslot) {
   1350      1.27       chs 						continue;
   1351      1.27       chs 					}
   1352      1.27       chs 
   1353      1.27       chs 					/*
   1354      1.27       chs 					 * process the page,
   1355      1.27       chs 					 * the start over on this object
   1356      1.27       chs 					 * since the swhash elt
   1357      1.27       chs 					 * may have been freed.
   1358      1.27       chs 					 */
   1359      1.46       chs 
   1360      1.27       chs 					rv = uao_pagein_page(aobj,
   1361      1.27       chs 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1362      1.27       chs 					if (rv) {
   1363      1.27       chs 						return rv;
   1364      1.27       chs 					}
   1365      1.27       chs 					goto restart;
   1366      1.27       chs 				}
   1367      1.27       chs 			}
   1368      1.27       chs 		}
   1369      1.27       chs 	} else {
   1370      1.27       chs 		int i;
   1371      1.27       chs 
   1372      1.27       chs 		for (i = 0; i < aobj->u_pages; i++) {
   1373      1.27       chs 			int slot = aobj->u_swslots[i];
   1374      1.27       chs 
   1375      1.27       chs 			/*
   1376      1.27       chs 			 * if the slot isn't in range, skip it
   1377      1.27       chs 			 */
   1378      1.46       chs 
   1379      1.27       chs 			if (slot < startslot || slot >= endslot) {
   1380      1.27       chs 				continue;
   1381      1.27       chs 			}
   1382      1.27       chs 
   1383      1.27       chs 			/*
   1384      1.27       chs 			 * process the page.
   1385      1.27       chs 			 */
   1386      1.46       chs 
   1387      1.27       chs 			rv = uao_pagein_page(aobj, i);
   1388      1.27       chs 			if (rv) {
   1389      1.27       chs 				return rv;
   1390      1.27       chs 			}
   1391      1.27       chs 		}
   1392      1.27       chs 	}
   1393      1.27       chs 
   1394  1.68.2.3      yamt 	return false;
   1395      1.27       chs }
   1396      1.27       chs 
   1397      1.27       chs /*
   1398      1.27       chs  * page in a page from an aobj.  used for swap_off.
   1399  1.68.2.3      yamt  * returns true if pagein was aborted due to lack of memory.
   1400      1.27       chs  *
   1401      1.27       chs  * => aobj must be locked and is returned locked.
   1402      1.27       chs  */
   1403      1.46       chs 
   1404  1.68.2.3      yamt static bool
   1405      1.67   thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1406      1.27       chs {
   1407      1.27       chs 	struct vm_page *pg;
   1408      1.57        pk 	int rv, npages;
   1409      1.27       chs 
   1410      1.27       chs 	pg = NULL;
   1411      1.27       chs 	npages = 1;
   1412      1.27       chs 	/* locked: aobj */
   1413      1.27       chs 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1414  1.68.2.1      yamt 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1415      1.27       chs 	/* unlocked: aobj */
   1416      1.27       chs 
   1417      1.27       chs 	/*
   1418      1.27       chs 	 * relock and finish up.
   1419      1.27       chs 	 */
   1420      1.46       chs 
   1421  1.68.2.6      yamt 	mutex_enter(&aobj->u_obj.vmobjlock);
   1422      1.27       chs 	switch (rv) {
   1423      1.40       chs 	case 0:
   1424      1.27       chs 		break;
   1425      1.27       chs 
   1426      1.40       chs 	case EIO:
   1427      1.40       chs 	case ERESTART:
   1428      1.46       chs 
   1429      1.27       chs 		/*
   1430      1.27       chs 		 * nothing more to do on errors.
   1431      1.40       chs 		 * ERESTART can only mean that the anon was freed,
   1432      1.27       chs 		 * so again there's nothing to do.
   1433      1.27       chs 		 */
   1434      1.46       chs 
   1435  1.68.2.3      yamt 		return false;
   1436      1.59        pk 
   1437      1.59        pk 	default:
   1438  1.68.2.3      yamt 		return true;
   1439      1.27       chs 	}
   1440      1.27       chs 
   1441      1.27       chs 	/*
   1442      1.27       chs 	 * ok, we've got the page now.
   1443      1.27       chs 	 * mark it as dirty, clear its swslot and un-busy it.
   1444      1.27       chs 	 */
   1445      1.57        pk 	uao_dropswap(&aobj->u_obj, pageidx);
   1446      1.27       chs 
   1447      1.27       chs 	/*
   1448  1.68.2.2      yamt 	 * make sure it's on a page queue.
   1449      1.27       chs 	 */
   1450  1.68.2.6      yamt 	mutex_enter(&uvm_pageqlock);
   1451      1.58        pk 	if (pg->wire_count == 0)
   1452  1.68.2.2      yamt 		uvm_pageenqueue(pg);
   1453  1.68.2.6      yamt 	mutex_exit(&uvm_pageqlock);
   1454      1.56      yamt 
   1455      1.59        pk 	if (pg->flags & PG_WANTED) {
   1456      1.59        pk 		wakeup(pg);
   1457      1.59        pk 	}
   1458      1.59        pk 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1459      1.56      yamt 	UVM_PAGE_OWN(pg, NULL);
   1460      1.56      yamt 
   1461  1.68.2.3      yamt 	return false;
   1462       1.1       mrg }
   1463  1.68.2.1      yamt 
   1464  1.68.2.1      yamt /*
   1465  1.68.2.1      yamt  * uao_dropswap_range: drop swapslots in the range.
   1466  1.68.2.1      yamt  *
   1467  1.68.2.1      yamt  * => aobj must be locked and is returned locked.
   1468  1.68.2.1      yamt  * => start is inclusive.  end is exclusive.
   1469  1.68.2.1      yamt  */
   1470  1.68.2.1      yamt 
   1471  1.68.2.1      yamt void
   1472  1.68.2.1      yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1473  1.68.2.1      yamt {
   1474  1.68.2.1      yamt 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1475  1.68.2.1      yamt 
   1476  1.68.2.6      yamt 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1477  1.68.2.1      yamt 
   1478  1.68.2.1      yamt 	uao_dropswap_range1(aobj, start, end);
   1479  1.68.2.1      yamt }
   1480  1.68.2.1      yamt 
   1481  1.68.2.1      yamt static void
   1482  1.68.2.1      yamt uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1483  1.68.2.1      yamt {
   1484  1.68.2.1      yamt 	int swpgonlydelta = 0;
   1485  1.68.2.1      yamt 
   1486  1.68.2.1      yamt 	if (end == 0) {
   1487  1.68.2.1      yamt 		end = INT64_MAX;
   1488  1.68.2.1      yamt 	}
   1489  1.68.2.1      yamt 
   1490  1.68.2.1      yamt 	if (UAO_USES_SWHASH(aobj)) {
   1491  1.68.2.1      yamt 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1492  1.68.2.1      yamt 		voff_t taghi;
   1493  1.68.2.1      yamt 		voff_t taglo;
   1494  1.68.2.1      yamt 
   1495  1.68.2.1      yamt 		taglo = UAO_SWHASH_ELT_TAG(start);
   1496  1.68.2.1      yamt 		taghi = UAO_SWHASH_ELT_TAG(end);
   1497  1.68.2.1      yamt 
   1498  1.68.2.1      yamt 		for (i = 0; i < hashbuckets; i++) {
   1499  1.68.2.1      yamt 			struct uao_swhash_elt *elt, *next;
   1500  1.68.2.1      yamt 
   1501  1.68.2.1      yamt 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1502  1.68.2.1      yamt 			     elt != NULL;
   1503  1.68.2.1      yamt 			     elt = next) {
   1504  1.68.2.1      yamt 				int startidx, endidx;
   1505  1.68.2.1      yamt 				int j;
   1506  1.68.2.1      yamt 
   1507  1.68.2.1      yamt 				next = LIST_NEXT(elt, list);
   1508  1.68.2.1      yamt 
   1509  1.68.2.1      yamt 				if (elt->tag < taglo || taghi < elt->tag) {
   1510  1.68.2.1      yamt 					continue;
   1511  1.68.2.1      yamt 				}
   1512  1.68.2.1      yamt 
   1513  1.68.2.1      yamt 				if (elt->tag == taglo) {
   1514  1.68.2.1      yamt 					startidx =
   1515  1.68.2.1      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1516  1.68.2.1      yamt 				} else {
   1517  1.68.2.1      yamt 					startidx = 0;
   1518  1.68.2.1      yamt 				}
   1519  1.68.2.1      yamt 
   1520  1.68.2.1      yamt 				if (elt->tag == taghi) {
   1521  1.68.2.1      yamt 					endidx =
   1522  1.68.2.1      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1523  1.68.2.1      yamt 				} else {
   1524  1.68.2.1      yamt 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1525  1.68.2.1      yamt 				}
   1526  1.68.2.1      yamt 
   1527  1.68.2.1      yamt 				for (j = startidx; j < endidx; j++) {
   1528  1.68.2.1      yamt 					int slot = elt->slots[j];
   1529  1.68.2.1      yamt 
   1530  1.68.2.1      yamt 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1531  1.68.2.1      yamt 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1532  1.68.2.1      yamt 					    + j) << PAGE_SHIFT) == NULL);
   1533  1.68.2.1      yamt 					if (slot > 0) {
   1534  1.68.2.1      yamt 						uvm_swap_free(slot, 1);
   1535  1.68.2.1      yamt 						swpgonlydelta++;
   1536  1.68.2.1      yamt 						KASSERT(elt->count > 0);
   1537  1.68.2.1      yamt 						elt->slots[j] = 0;
   1538  1.68.2.1      yamt 						elt->count--;
   1539  1.68.2.1      yamt 					}
   1540  1.68.2.1      yamt 				}
   1541  1.68.2.1      yamt 
   1542  1.68.2.1      yamt 				if (elt->count == 0) {
   1543  1.68.2.1      yamt 					LIST_REMOVE(elt, list);
   1544  1.68.2.1      yamt 					pool_put(&uao_swhash_elt_pool, elt);
   1545  1.68.2.1      yamt 				}
   1546  1.68.2.1      yamt 			}
   1547  1.68.2.1      yamt 		}
   1548  1.68.2.1      yamt 	} else {
   1549  1.68.2.1      yamt 		int i;
   1550  1.68.2.1      yamt 
   1551  1.68.2.1      yamt 		if (aobj->u_pages < end) {
   1552  1.68.2.1      yamt 			end = aobj->u_pages;
   1553  1.68.2.1      yamt 		}
   1554  1.68.2.1      yamt 		for (i = start; i < end; i++) {
   1555  1.68.2.1      yamt 			int slot = aobj->u_swslots[i];
   1556  1.68.2.1      yamt 
   1557  1.68.2.1      yamt 			if (slot > 0) {
   1558  1.68.2.1      yamt 				uvm_swap_free(slot, 1);
   1559  1.68.2.1      yamt 				swpgonlydelta++;
   1560  1.68.2.1      yamt 			}
   1561  1.68.2.1      yamt 		}
   1562  1.68.2.1      yamt 	}
   1563  1.68.2.1      yamt 
   1564  1.68.2.1      yamt 	/*
   1565  1.68.2.1      yamt 	 * adjust the counter of pages only in swap for all
   1566  1.68.2.1      yamt 	 * the swap slots we've freed.
   1567  1.68.2.1      yamt 	 */
   1568  1.68.2.1      yamt 
   1569  1.68.2.1      yamt 	if (swpgonlydelta > 0) {
   1570  1.68.2.4      yamt 		mutex_enter(&uvm_swap_data_lock);
   1571  1.68.2.1      yamt 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1572  1.68.2.1      yamt 		uvmexp.swpgonly -= swpgonlydelta;
   1573  1.68.2.4      yamt 		mutex_exit(&uvm_swap_data_lock);
   1574  1.68.2.1      yamt 	}
   1575  1.68.2.1      yamt }
   1576  1.68.2.1      yamt 
   1577  1.68.2.1      yamt #endif /* defined(VMSWAP) */
   1578