Home | History | Annotate | Line # | Download | only in uvm
uvm_aobj.c revision 1.108.4.5
      1  1.108.4.5     rmind /*	$NetBSD: uvm_aobj.c,v 1.108.4.5 2011/05/31 03:05:14 rmind Exp $	*/
      2        1.6       mrg 
      3        1.7       chs /*
      4        1.7       chs  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
      5        1.7       chs  *                    Washington University.
      6        1.7       chs  * All rights reserved.
      7        1.7       chs  *
      8        1.7       chs  * Redistribution and use in source and binary forms, with or without
      9        1.7       chs  * modification, are permitted provided that the following conditions
     10        1.7       chs  * are met:
     11        1.7       chs  * 1. Redistributions of source code must retain the above copyright
     12        1.7       chs  *    notice, this list of conditions and the following disclaimer.
     13        1.7       chs  * 2. Redistributions in binary form must reproduce the above copyright
     14        1.7       chs  *    notice, this list of conditions and the following disclaimer in the
     15        1.7       chs  *    documentation and/or other materials provided with the distribution.
     16        1.7       chs  *
     17        1.7       chs  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18        1.7       chs  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19        1.7       chs  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20        1.7       chs  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21        1.7       chs  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22        1.7       chs  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23        1.7       chs  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24        1.7       chs  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25        1.7       chs  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26        1.7       chs  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27        1.7       chs  *
     28        1.4       mrg  * from: Id: uvm_aobj.c,v 1.1.2.5 1998/02/06 05:14:38 chs Exp
     29        1.4       mrg  */
     30  1.108.4.3     rmind 
     31        1.7       chs /*
     32        1.7       chs  * uvm_aobj.c: anonymous memory uvm_object pager
     33        1.7       chs  *
     34        1.7       chs  * author: Chuck Silvers <chuq (at) chuq.com>
     35        1.7       chs  * started: Jan-1998
     36        1.7       chs  *
     37        1.7       chs  * - design mostly from Chuck Cranor
     38        1.7       chs  */
     39       1.49     lukem 
     40       1.49     lukem #include <sys/cdefs.h>
     41  1.108.4.5     rmind __KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.108.4.5 2011/05/31 03:05:14 rmind Exp $");
     42        1.7       chs 
     43        1.7       chs #include "opt_uvmhist.h"
     44        1.1       mrg 
     45        1.1       mrg #include <sys/param.h>
     46        1.1       mrg #include <sys/systm.h>
     47        1.1       mrg #include <sys/proc.h>
     48       1.37       chs #include <sys/kernel.h>
     49      1.104     rmind #include <sys/kmem.h>
     50       1.12   thorpej #include <sys/pool.h>
     51        1.1       mrg 
     52        1.1       mrg #include <uvm/uvm.h>
     53        1.1       mrg 
     54        1.1       mrg /*
     55        1.1       mrg  * an aobj manages anonymous-memory backed uvm_objects.   in addition
     56        1.1       mrg  * to keeping the list of resident pages, it also keeps a list of
     57        1.1       mrg  * allocated swap blocks.  depending on the size of the aobj this list
     58        1.1       mrg  * of allocated swap blocks is either stored in an array (small objects)
     59        1.1       mrg  * or in a hash table (large objects).
     60        1.1       mrg  */
     61        1.1       mrg 
     62        1.1       mrg /*
     63        1.1       mrg  * local structures
     64        1.1       mrg  */
     65        1.1       mrg 
     66        1.1       mrg /*
     67        1.1       mrg  * for hash tables, we break the address space of the aobj into blocks
     68        1.1       mrg  * of UAO_SWHASH_CLUSTER_SIZE pages.   we require the cluster size to
     69        1.1       mrg  * be a power of two.
     70        1.1       mrg  */
     71        1.1       mrg 
     72        1.1       mrg #define UAO_SWHASH_CLUSTER_SHIFT 4
     73        1.1       mrg #define UAO_SWHASH_CLUSTER_SIZE (1 << UAO_SWHASH_CLUSTER_SHIFT)
     74        1.1       mrg 
     75        1.1       mrg /* get the "tag" for this page index */
     76        1.1       mrg #define UAO_SWHASH_ELT_TAG(PAGEIDX) \
     77        1.1       mrg 	((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT)
     78        1.1       mrg 
     79       1.75      yamt #define UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX) \
     80       1.75      yamt 	((PAGEIDX) & (UAO_SWHASH_CLUSTER_SIZE - 1))
     81       1.75      yamt 
     82        1.1       mrg /* given an ELT and a page index, find the swap slot */
     83        1.1       mrg #define UAO_SWHASH_ELT_PAGESLOT(ELT, PAGEIDX) \
     84       1.75      yamt 	((ELT)->slots[UAO_SWHASH_ELT_PAGESLOT_IDX(PAGEIDX)])
     85        1.1       mrg 
     86        1.1       mrg /* given an ELT, return its pageidx base */
     87        1.1       mrg #define UAO_SWHASH_ELT_PAGEIDX_BASE(ELT) \
     88        1.1       mrg 	((ELT)->tag << UAO_SWHASH_CLUSTER_SHIFT)
     89        1.1       mrg 
     90        1.1       mrg /*
     91        1.1       mrg  * the swhash hash function
     92        1.1       mrg  */
     93       1.46       chs 
     94        1.1       mrg #define UAO_SWHASH_HASH(AOBJ, PAGEIDX) \
     95        1.1       mrg 	(&(AOBJ)->u_swhash[(((PAGEIDX) >> UAO_SWHASH_CLUSTER_SHIFT) \
     96        1.1       mrg 			    & (AOBJ)->u_swhashmask)])
     97        1.1       mrg 
     98        1.1       mrg /*
     99        1.1       mrg  * the swhash threshhold determines if we will use an array or a
    100        1.1       mrg  * hash table to store the list of allocated swap blocks.
    101        1.1       mrg  */
    102        1.1       mrg 
    103        1.1       mrg #define UAO_SWHASH_THRESHOLD (UAO_SWHASH_CLUSTER_SIZE * 4)
    104        1.1       mrg #define UAO_USES_SWHASH(AOBJ) \
    105        1.1       mrg 	((AOBJ)->u_pages > UAO_SWHASH_THRESHOLD)	/* use hash? */
    106        1.1       mrg 
    107        1.1       mrg /*
    108        1.3       chs  * the number of buckets in a swhash, with an upper bound
    109        1.1       mrg  */
    110       1.46       chs 
    111        1.1       mrg #define UAO_SWHASH_MAXBUCKETS 256
    112        1.1       mrg #define UAO_SWHASH_BUCKETS(AOBJ) \
    113       1.46       chs 	(MIN((AOBJ)->u_pages >> UAO_SWHASH_CLUSTER_SHIFT, \
    114        1.1       mrg 	     UAO_SWHASH_MAXBUCKETS))
    115        1.1       mrg 
    116        1.1       mrg /*
    117        1.1       mrg  * uao_swhash_elt: when a hash table is being used, this structure defines
    118        1.1       mrg  * the format of an entry in the bucket list.
    119        1.1       mrg  */
    120        1.1       mrg 
    121        1.1       mrg struct uao_swhash_elt {
    122        1.5       mrg 	LIST_ENTRY(uao_swhash_elt) list;	/* the hash list */
    123       1.28    kleink 	voff_t tag;				/* our 'tag' */
    124        1.5       mrg 	int count;				/* our number of active slots */
    125        1.5       mrg 	int slots[UAO_SWHASH_CLUSTER_SIZE];	/* the slots */
    126        1.1       mrg };
    127        1.1       mrg 
    128        1.1       mrg /*
    129        1.1       mrg  * uao_swhash: the swap hash table structure
    130        1.1       mrg  */
    131        1.1       mrg 
    132        1.1       mrg LIST_HEAD(uao_swhash, uao_swhash_elt);
    133        1.1       mrg 
    134       1.12   thorpej /*
    135  1.108.4.3     rmind  * uao_swhash_elt_pool: pool of uao_swhash_elt structures.
    136  1.108.4.3     rmind  * Note: pages for this pool must not come from a pageable kernel map.
    137       1.12   thorpej  */
    138      1.107     pooka static struct pool uao_swhash_elt_pool;
    139        1.1       mrg 
    140        1.1       mrg /*
    141        1.1       mrg  * uvm_aobj: the actual anon-backed uvm_object
    142        1.1       mrg  *
    143        1.1       mrg  * => the uvm_object is at the top of the structure, this allows
    144       1.46       chs  *   (struct uvm_aobj *) == (struct uvm_object *)
    145        1.1       mrg  * => only one of u_swslots and u_swhash is used in any given aobj
    146        1.1       mrg  */
    147        1.1       mrg 
    148        1.1       mrg struct uvm_aobj {
    149        1.5       mrg 	struct uvm_object u_obj; /* has: lock, pgops, memq, #pages, #refs */
    150       1.79    cherry 	pgoff_t u_pages;	 /* number of pages in entire object */
    151        1.5       mrg 	int u_flags;		 /* the flags (see uvm_aobj.h) */
    152        1.5       mrg 	int *u_swslots;		 /* array of offset->swapslot mappings */
    153        1.5       mrg 				 /*
    154        1.5       mrg 				  * hashtable of offset->swapslot mappings
    155        1.5       mrg 				  * (u_swhash is an array of bucket heads)
    156        1.5       mrg 				  */
    157        1.5       mrg 	struct uao_swhash *u_swhash;
    158        1.5       mrg 	u_long u_swhashmask;		/* mask for hashtable */
    159        1.5       mrg 	LIST_ENTRY(uvm_aobj) u_list;	/* global list of aobjs */
    160        1.1       mrg };
    161        1.1       mrg 
    162        1.1       mrg /*
    163        1.1       mrg  * local functions
    164        1.1       mrg  */
    165        1.1       mrg 
    166       1.62  junyoung static void	uao_free(struct uvm_aobj *);
    167       1.62  junyoung static int	uao_get(struct uvm_object *, voff_t, struct vm_page **,
    168       1.62  junyoung 		    int *, int, vm_prot_t, int, int);
    169       1.86      matt static int	uao_put(struct uvm_object *, voff_t, voff_t, int);
    170       1.72      yamt 
    171      1.106      yamt static void uao_detach_locked(struct uvm_object *);
    172      1.106      yamt static void uao_reference_locked(struct uvm_object *);
    173      1.106      yamt 
    174       1.72      yamt #if defined(VMSWAP)
    175       1.72      yamt static struct uao_swhash_elt *uao_find_swhash_elt
    176       1.85   thorpej     (struct uvm_aobj *, int, bool);
    177       1.72      yamt 
    178       1.85   thorpej static bool uao_pagein(struct uvm_aobj *, int, int);
    179       1.85   thorpej static bool uao_pagein_page(struct uvm_aobj *, int);
    180       1.75      yamt static void uao_dropswap_range1(struct uvm_aobj *, voff_t, voff_t);
    181       1.72      yamt #endif /* defined(VMSWAP) */
    182        1.1       mrg 
    183        1.1       mrg /*
    184        1.1       mrg  * aobj_pager
    185       1.41       chs  *
    186        1.1       mrg  * note that some functions (e.g. put) are handled elsewhere
    187        1.1       mrg  */
    188        1.1       mrg 
    189       1.95      yamt const struct uvm_pagerops aobj_pager = {
    190       1.94      yamt 	.pgo_reference = uao_reference,
    191       1.94      yamt 	.pgo_detach = uao_detach,
    192       1.94      yamt 	.pgo_get = uao_get,
    193       1.94      yamt 	.pgo_put = uao_put,
    194        1.1       mrg };
    195        1.1       mrg 
    196        1.1       mrg /*
    197        1.1       mrg  * uao_list: global list of active aobjs, locked by uao_list_lock
    198        1.1       mrg  */
    199        1.1       mrg 
    200        1.1       mrg static LIST_HEAD(aobjlist, uvm_aobj) uao_list;
    201       1.90        ad static kmutex_t uao_list_lock;
    202        1.1       mrg 
    203        1.1       mrg /*
    204        1.1       mrg  * functions
    205        1.1       mrg  */
    206        1.1       mrg 
    207        1.1       mrg /*
    208        1.1       mrg  * hash table/array related functions
    209        1.1       mrg  */
    210        1.1       mrg 
    211       1.72      yamt #if defined(VMSWAP)
    212       1.72      yamt 
    213        1.1       mrg /*
    214        1.1       mrg  * uao_find_swhash_elt: find (or create) a hash table entry for a page
    215        1.1       mrg  * offset.
    216        1.1       mrg  *
    217        1.1       mrg  * => the object should be locked by the caller
    218        1.1       mrg  */
    219        1.1       mrg 
    220        1.5       mrg static struct uao_swhash_elt *
    221       1.85   thorpej uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, bool create)
    222        1.5       mrg {
    223        1.5       mrg 	struct uao_swhash *swhash;
    224        1.5       mrg 	struct uao_swhash_elt *elt;
    225       1.28    kleink 	voff_t page_tag;
    226        1.1       mrg 
    227       1.45       chs 	swhash = UAO_SWHASH_HASH(aobj, pageidx);
    228       1.45       chs 	page_tag = UAO_SWHASH_ELT_TAG(pageidx);
    229        1.1       mrg 
    230        1.5       mrg 	/*
    231        1.5       mrg 	 * now search the bucket for the requested tag
    232        1.5       mrg 	 */
    233       1.45       chs 
    234       1.37       chs 	LIST_FOREACH(elt, swhash, list) {
    235       1.45       chs 		if (elt->tag == page_tag) {
    236       1.45       chs 			return elt;
    237       1.45       chs 		}
    238        1.5       mrg 	}
    239       1.45       chs 	if (!create) {
    240        1.5       mrg 		return NULL;
    241       1.45       chs 	}
    242        1.5       mrg 
    243        1.5       mrg 	/*
    244       1.12   thorpej 	 * allocate a new entry for the bucket and init/insert it in
    245        1.5       mrg 	 */
    246       1.45       chs 
    247       1.45       chs 	elt = pool_get(&uao_swhash_elt_pool, PR_NOWAIT);
    248       1.45       chs 	if (elt == NULL) {
    249       1.45       chs 		return NULL;
    250       1.45       chs 	}
    251        1.5       mrg 	LIST_INSERT_HEAD(swhash, elt, list);
    252        1.5       mrg 	elt->tag = page_tag;
    253        1.5       mrg 	elt->count = 0;
    254        1.9     perry 	memset(elt->slots, 0, sizeof(elt->slots));
    255       1.45       chs 	return elt;
    256        1.1       mrg }
    257        1.1       mrg 
    258        1.1       mrg /*
    259        1.1       mrg  * uao_find_swslot: find the swap slot number for an aobj/pageidx
    260        1.1       mrg  *
    261       1.41       chs  * => object must be locked by caller
    262        1.1       mrg  */
    263       1.46       chs 
    264       1.46       chs int
    265       1.67   thorpej uao_find_swslot(struct uvm_object *uobj, int pageidx)
    266        1.1       mrg {
    267       1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    268       1.46       chs 	struct uao_swhash_elt *elt;
    269        1.1       mrg 
    270        1.5       mrg 	/*
    271        1.5       mrg 	 * if noswap flag is set, then we never return a slot
    272        1.5       mrg 	 */
    273        1.1       mrg 
    274        1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP)
    275        1.5       mrg 		return(0);
    276        1.1       mrg 
    277        1.5       mrg 	/*
    278        1.5       mrg 	 * if hashing, look in hash table.
    279        1.5       mrg 	 */
    280        1.1       mrg 
    281        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    282       1.87   thorpej 		elt = uao_find_swhash_elt(aobj, pageidx, false);
    283        1.5       mrg 		if (elt)
    284        1.5       mrg 			return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
    285        1.5       mrg 		else
    286       1.31   thorpej 			return(0);
    287        1.5       mrg 	}
    288        1.1       mrg 
    289       1.41       chs 	/*
    290        1.5       mrg 	 * otherwise, look in the array
    291        1.5       mrg 	 */
    292       1.46       chs 
    293        1.5       mrg 	return(aobj->u_swslots[pageidx]);
    294        1.1       mrg }
    295        1.1       mrg 
    296        1.1       mrg /*
    297        1.1       mrg  * uao_set_swslot: set the swap slot for a page in an aobj.
    298        1.1       mrg  *
    299        1.1       mrg  * => setting a slot to zero frees the slot
    300        1.1       mrg  * => object must be locked by caller
    301       1.45       chs  * => we return the old slot number, or -1 if we failed to allocate
    302       1.45       chs  *    memory to record the new slot number
    303        1.1       mrg  */
    304       1.46       chs 
    305        1.5       mrg int
    306       1.67   thorpej uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
    307        1.5       mrg {
    308        1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    309       1.45       chs 	struct uao_swhash_elt *elt;
    310        1.5       mrg 	int oldslot;
    311        1.5       mrg 	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
    312        1.5       mrg 	UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d",
    313        1.5       mrg 	    aobj, pageidx, slot, 0);
    314        1.1       mrg 
    315  1.108.4.2     rmind 	KASSERT(mutex_owned(uobj->vmobjlock) || uobj->uo_refs == 0);
    316  1.108.4.2     rmind 
    317        1.5       mrg 	/*
    318       1.46       chs 	 * if noswap flag is set, then we can't set a non-zero slot.
    319        1.5       mrg 	 */
    320        1.1       mrg 
    321        1.5       mrg 	if (aobj->u_flags & UAO_FLAG_NOSWAP) {
    322        1.5       mrg 		if (slot == 0)
    323       1.46       chs 			return(0);
    324        1.1       mrg 
    325        1.5       mrg 		printf("uao_set_swslot: uobj = %p\n", uobj);
    326       1.46       chs 		panic("uao_set_swslot: NOSWAP object");
    327        1.5       mrg 	}
    328        1.1       mrg 
    329        1.5       mrg 	/*
    330        1.5       mrg 	 * are we using a hash table?  if so, add it in the hash.
    331        1.5       mrg 	 */
    332        1.1       mrg 
    333        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    334       1.39       chs 
    335       1.12   thorpej 		/*
    336       1.12   thorpej 		 * Avoid allocating an entry just to free it again if
    337       1.12   thorpej 		 * the page had not swap slot in the first place, and
    338       1.12   thorpej 		 * we are freeing.
    339       1.12   thorpej 		 */
    340       1.39       chs 
    341       1.46       chs 		elt = uao_find_swhash_elt(aobj, pageidx, slot != 0);
    342       1.12   thorpej 		if (elt == NULL) {
    343       1.45       chs 			return slot ? -1 : 0;
    344       1.12   thorpej 		}
    345        1.5       mrg 
    346        1.5       mrg 		oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
    347        1.5       mrg 		UAO_SWHASH_ELT_PAGESLOT(elt, pageidx) = slot;
    348        1.5       mrg 
    349        1.5       mrg 		/*
    350        1.5       mrg 		 * now adjust the elt's reference counter and free it if we've
    351        1.5       mrg 		 * dropped it to zero.
    352        1.5       mrg 		 */
    353        1.5       mrg 
    354        1.5       mrg 		if (slot) {
    355        1.5       mrg 			if (oldslot == 0)
    356        1.5       mrg 				elt->count++;
    357       1.45       chs 		} else {
    358       1.45       chs 			if (oldslot)
    359        1.5       mrg 				elt->count--;
    360        1.5       mrg 
    361        1.5       mrg 			if (elt->count == 0) {
    362        1.5       mrg 				LIST_REMOVE(elt, list);
    363       1.12   thorpej 				pool_put(&uao_swhash_elt_pool, elt);
    364        1.5       mrg 			}
    365        1.5       mrg 		}
    366       1.41       chs 	} else {
    367        1.5       mrg 		/* we are using an array */
    368        1.5       mrg 		oldslot = aobj->u_swslots[pageidx];
    369        1.5       mrg 		aobj->u_swslots[pageidx] = slot;
    370        1.5       mrg 	}
    371        1.5       mrg 	return (oldslot);
    372        1.1       mrg }
    373        1.1       mrg 
    374       1.72      yamt #endif /* defined(VMSWAP) */
    375       1.72      yamt 
    376        1.1       mrg /*
    377        1.1       mrg  * end of hash/array functions
    378        1.1       mrg  */
    379        1.1       mrg 
    380        1.1       mrg /*
    381        1.1       mrg  * uao_free: free all resources held by an aobj, and then free the aobj
    382        1.1       mrg  *
    383        1.1       mrg  * => the aobj should be dead
    384        1.1       mrg  */
    385       1.46       chs 
    386        1.1       mrg static void
    387       1.67   thorpej uao_free(struct uvm_aobj *aobj)
    388        1.1       mrg {
    389       1.96        ad 
    390       1.93     pooka #if defined(VMSWAP)
    391       1.93     pooka 	uao_dropswap_range1(aobj, 0, 0);
    392       1.93     pooka #endif /* defined(VMSWAP) */
    393       1.93     pooka 
    394  1.108.4.1     rmind 	mutex_exit(aobj->u_obj.vmobjlock);
    395       1.72      yamt 
    396       1.72      yamt #if defined(VMSWAP)
    397        1.5       mrg 	if (UAO_USES_SWHASH(aobj)) {
    398        1.1       mrg 
    399        1.5       mrg 		/*
    400       1.75      yamt 		 * free the hash table itself.
    401        1.5       mrg 		 */
    402       1.46       chs 
    403      1.104     rmind 		hashdone(aobj->u_swhash, HASH_LIST, aobj->u_swhashmask);
    404        1.5       mrg 	} else {
    405        1.5       mrg 
    406        1.5       mrg 		/*
    407       1.75      yamt 		 * free the array itsself.
    408        1.5       mrg 		 */
    409        1.5       mrg 
    410      1.104     rmind 		kmem_free(aobj->u_swslots, aobj->u_pages * sizeof(int));
    411        1.1       mrg 	}
    412       1.72      yamt #endif /* defined(VMSWAP) */
    413       1.72      yamt 
    414        1.5       mrg 	/*
    415        1.5       mrg 	 * finally free the aobj itself
    416        1.5       mrg 	 */
    417       1.46       chs 
    418  1.108.4.4     rmind 	uvm_obj_destroy(&aobj->u_obj, true);
    419  1.108.4.3     rmind 	kmem_free(aobj, sizeof(struct uvm_aobj));
    420        1.1       mrg }
    421        1.1       mrg 
    422        1.1       mrg /*
    423        1.1       mrg  * pager functions
    424        1.1       mrg  */
    425        1.1       mrg 
    426        1.1       mrg /*
    427        1.1       mrg  * uao_create: create an aobj of the given size and return its uvm_object.
    428        1.1       mrg  *
    429        1.1       mrg  * => for normal use, flags are always zero
    430        1.1       mrg  * => for the kernel object, the flags are:
    431        1.1       mrg  *	UAO_FLAG_KERNOBJ - allocate the kernel object (can only happen once)
    432        1.1       mrg  *	UAO_FLAG_KERNSWAP - enable swapping of kernel object ("           ")
    433        1.1       mrg  */
    434       1.46       chs 
    435        1.5       mrg struct uvm_object *
    436       1.67   thorpej uao_create(vsize_t size, int flags)
    437        1.5       mrg {
    438       1.46       chs 	static struct uvm_aobj kernel_object_store;
    439  1.108.4.1     rmind 	static kmutex_t kernel_object_lock;
    440       1.46       chs 	static int kobj_alloced = 0;
    441       1.79    cherry 	pgoff_t pages = round_page(size) >> PAGE_SHIFT;
    442        1.5       mrg 	struct uvm_aobj *aobj;
    443       1.66      yamt 	int refs;
    444        1.1       mrg 
    445        1.5       mrg 	/*
    446  1.108.4.5     rmind 	 * Allocate a new aobj, unless kernel object is requested.
    447       1.27       chs 	 */
    448        1.5       mrg 
    449       1.46       chs 	if (flags & UAO_FLAG_KERNOBJ) {
    450       1.46       chs 		KASSERT(!kobj_alloced);
    451        1.5       mrg 		aobj = &kernel_object_store;
    452        1.5       mrg 		aobj->u_pages = pages;
    453       1.46       chs 		aobj->u_flags = UAO_FLAG_NOSWAP;
    454       1.66      yamt 		refs = UVM_OBJ_KERN;
    455        1.5       mrg 		kobj_alloced = UAO_FLAG_KERNOBJ;
    456        1.5       mrg 	} else if (flags & UAO_FLAG_KERNSWAP) {
    457       1.46       chs 		KASSERT(kobj_alloced == UAO_FLAG_KERNOBJ);
    458        1.5       mrg 		aobj = &kernel_object_store;
    459        1.5       mrg 		kobj_alloced = UAO_FLAG_KERNSWAP;
    460       1.66      yamt 		refs = 0xdeadbeaf; /* XXX: gcc */
    461       1.46       chs 	} else {
    462  1.108.4.3     rmind 		aobj = kmem_alloc(sizeof(struct uvm_aobj), KM_SLEEP);
    463        1.5       mrg 		aobj->u_pages = pages;
    464       1.46       chs 		aobj->u_flags = 0;
    465       1.66      yamt 		refs = 1;
    466        1.5       mrg 	}
    467        1.1       mrg 
    468        1.5       mrg 	/*
    469        1.5       mrg  	 * allocate hash/array if necessary
    470        1.5       mrg  	 *
    471        1.5       mrg  	 * note: in the KERNSWAP case no need to worry about locking since
    472        1.5       mrg  	 * we are still booting we should be the only thread around.
    473        1.5       mrg  	 */
    474       1.46       chs 
    475        1.5       mrg 	if (flags == 0 || (flags & UAO_FLAG_KERNSWAP) != 0) {
    476       1.72      yamt #if defined(VMSWAP)
    477      1.104     rmind 		const int kernswap = (flags & UAO_FLAG_KERNSWAP) != 0;
    478        1.5       mrg 
    479        1.5       mrg 		/* allocate hash table or array depending on object size */
    480       1.27       chs 		if (UAO_USES_SWHASH(aobj)) {
    481      1.104     rmind 			aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj),
    482      1.104     rmind 			    HASH_LIST, kernswap ? false : true,
    483      1.104     rmind 			    &aobj->u_swhashmask);
    484        1.5       mrg 			if (aobj->u_swhash == NULL)
    485        1.5       mrg 				panic("uao_create: hashinit swhash failed");
    486        1.5       mrg 		} else {
    487      1.104     rmind 			aobj->u_swslots = kmem_zalloc(pages * sizeof(int),
    488      1.104     rmind 			    kernswap ? KM_NOSLEEP : KM_SLEEP);
    489        1.5       mrg 			if (aobj->u_swslots == NULL)
    490  1.108.4.5     rmind 				panic("uao_create: swslots allocation failed");
    491        1.5       mrg 		}
    492       1.72      yamt #endif /* defined(VMSWAP) */
    493        1.5       mrg 
    494        1.5       mrg 		if (flags) {
    495        1.5       mrg 			aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
    496        1.5       mrg 			return(&aobj->u_obj);
    497        1.5       mrg 		}
    498        1.5       mrg 	}
    499        1.5       mrg 
    500        1.5       mrg 	/*
    501  1.108.4.4     rmind 	 * Initialise UVM object.
    502  1.108.4.4     rmind 	 */
    503       1.46       chs 
    504  1.108.4.4     rmind 	const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0;
    505  1.108.4.4     rmind 	uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs);
    506  1.108.4.4     rmind 	if (__predict_false(kernobj)) {
    507  1.108.4.4     rmind 		/* Initialisation only once, for UAO_FLAG_KERNOBJ. */
    508  1.108.4.4     rmind 		mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE);
    509  1.108.4.4     rmind 		uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock);
    510  1.108.4.4     rmind 	}
    511        1.1       mrg 
    512        1.5       mrg 	/*
    513        1.5       mrg  	 * now that aobj is ready, add it to the global list
    514        1.5       mrg  	 */
    515       1.46       chs 
    516       1.90        ad 	mutex_enter(&uao_list_lock);
    517        1.5       mrg 	LIST_INSERT_HEAD(&uao_list, aobj, u_list);
    518       1.90        ad 	mutex_exit(&uao_list_lock);
    519        1.5       mrg 	return(&aobj->u_obj);
    520        1.1       mrg }
    521        1.1       mrg 
    522        1.1       mrg 
    523        1.1       mrg 
    524        1.1       mrg /*
    525        1.1       mrg  * uao_init: set up aobj pager subsystem
    526        1.1       mrg  *
    527        1.1       mrg  * => called at boot time from uvm_pager_init()
    528        1.1       mrg  */
    529       1.46       chs 
    530       1.27       chs void
    531       1.46       chs uao_init(void)
    532        1.5       mrg {
    533       1.12   thorpej 	static int uao_initialized;
    534       1.12   thorpej 
    535       1.12   thorpej 	if (uao_initialized)
    536       1.12   thorpej 		return;
    537       1.87   thorpej 	uao_initialized = true;
    538        1.5       mrg 	LIST_INIT(&uao_list);
    539       1.96        ad 	mutex_init(&uao_list_lock, MUTEX_DEFAULT, IPL_NONE);
    540      1.107     pooka 	pool_init(&uao_swhash_elt_pool, sizeof(struct uao_swhash_elt),
    541      1.107     pooka 	    0, 0, 0, "uaoeltpl", NULL, IPL_VM);
    542        1.1       mrg }
    543        1.1       mrg 
    544        1.1       mrg /*
    545        1.1       mrg  * uao_reference: add a ref to an aobj
    546        1.1       mrg  *
    547       1.27       chs  * => aobj must be unlocked
    548       1.27       chs  * => just lock it and call the locked version
    549        1.1       mrg  */
    550       1.46       chs 
    551        1.5       mrg void
    552       1.67   thorpej uao_reference(struct uvm_object *uobj)
    553        1.1       mrg {
    554      1.101        ad 
    555      1.101        ad 	/*
    556      1.101        ad  	 * kernel_object already has plenty of references, leave it alone.
    557      1.101        ad  	 */
    558      1.101        ad 
    559      1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    560      1.101        ad 		return;
    561      1.101        ad 
    562  1.108.4.1     rmind 	mutex_enter(uobj->vmobjlock);
    563       1.27       chs 	uao_reference_locked(uobj);
    564  1.108.4.1     rmind 	mutex_exit(uobj->vmobjlock);
    565       1.27       chs }
    566       1.27       chs 
    567       1.27       chs /*
    568       1.27       chs  * uao_reference_locked: add a ref to an aobj that is already locked
    569       1.27       chs  *
    570       1.27       chs  * => aobj must be locked
    571       1.27       chs  * this needs to be separate from the normal routine
    572       1.27       chs  * since sometimes we need to add a reference to an aobj when
    573       1.27       chs  * it's already locked.
    574       1.27       chs  */
    575       1.46       chs 
    576      1.106      yamt static void
    577       1.67   thorpej uao_reference_locked(struct uvm_object *uobj)
    578       1.27       chs {
    579        1.5       mrg 	UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist);
    580        1.1       mrg 
    581        1.5       mrg 	/*
    582        1.5       mrg  	 * kernel_object already has plenty of references, leave it alone.
    583        1.5       mrg  	 */
    584        1.1       mrg 
    585       1.20   thorpej 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    586        1.5       mrg 		return;
    587        1.1       mrg 
    588       1.46       chs 	uobj->uo_refs++;
    589       1.41       chs 	UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
    590       1.27       chs 		    uobj, uobj->uo_refs,0,0);
    591        1.1       mrg }
    592        1.1       mrg 
    593        1.1       mrg /*
    594        1.1       mrg  * uao_detach: drop a reference to an aobj
    595        1.1       mrg  *
    596       1.27       chs  * => aobj must be unlocked
    597       1.27       chs  * => just lock it and call the locked version
    598        1.1       mrg  */
    599       1.46       chs 
    600        1.5       mrg void
    601       1.67   thorpej uao_detach(struct uvm_object *uobj)
    602        1.5       mrg {
    603      1.101        ad 
    604      1.101        ad 	/*
    605      1.101        ad  	 * detaching from kernel_object is a noop.
    606      1.101        ad  	 */
    607      1.101        ad 
    608      1.101        ad 	if (UVM_OBJ_IS_KERN_OBJECT(uobj))
    609      1.102        ad 		return;
    610      1.101        ad 
    611  1.108.4.1     rmind 	mutex_enter(uobj->vmobjlock);
    612       1.27       chs 	uao_detach_locked(uobj);
    613       1.27       chs }
    614       1.27       chs 
    615       1.27       chs /*
    616       1.27       chs  * uao_detach_locked: drop a reference to an aobj
    617       1.27       chs  *
    618       1.27       chs  * => aobj must be locked, and is unlocked (or freed) upon return.
    619       1.27       chs  * this needs to be separate from the normal routine
    620       1.27       chs  * since sometimes we need to detach from an aobj when
    621       1.27       chs  * it's already locked.
    622       1.27       chs  */
    623       1.46       chs 
    624      1.106      yamt static void
    625       1.67   thorpej uao_detach_locked(struct uvm_object *uobj)
    626       1.27       chs {
    627        1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    628       1.46       chs 	struct vm_page *pg;
    629        1.5       mrg 	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
    630        1.1       mrg 
    631        1.5       mrg 	/*
    632        1.5       mrg  	 * detaching from kernel_object is a noop.
    633        1.5       mrg  	 */
    634       1.46       chs 
    635       1.27       chs 	if (UVM_OBJ_IS_KERN_OBJECT(uobj)) {
    636  1.108.4.1     rmind 		mutex_exit(uobj->vmobjlock);
    637        1.5       mrg 		return;
    638       1.27       chs 	}
    639        1.5       mrg 
    640        1.5       mrg 	UVMHIST_LOG(maphist,"  (uobj=0x%x)  ref=%d", uobj,uobj->uo_refs,0,0);
    641       1.46       chs 	uobj->uo_refs--;
    642       1.46       chs 	if (uobj->uo_refs) {
    643  1.108.4.1     rmind 		mutex_exit(uobj->vmobjlock);
    644        1.5       mrg 		UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0);
    645        1.5       mrg 		return;
    646        1.5       mrg 	}
    647        1.5       mrg 
    648        1.5       mrg 	/*
    649        1.5       mrg  	 * remove the aobj from the global list.
    650        1.5       mrg  	 */
    651       1.46       chs 
    652       1.92        ad 	mutex_enter(&uao_list_lock);
    653        1.5       mrg 	LIST_REMOVE(aobj, u_list);
    654       1.92        ad 	mutex_exit(&uao_list_lock);
    655        1.5       mrg 
    656        1.5       mrg 	/*
    657       1.46       chs  	 * free all the pages left in the aobj.  for each page,
    658       1.46       chs 	 * when the page is no longer busy (and thus after any disk i/o that
    659       1.46       chs 	 * it's involved in is complete), release any swap resources and
    660       1.46       chs 	 * free the page itself.
    661        1.5       mrg  	 */
    662       1.46       chs 
    663       1.96        ad 	mutex_enter(&uvm_pageqlock);
    664       1.46       chs 	while ((pg = TAILQ_FIRST(&uobj->memq)) != NULL) {
    665       1.46       chs 		pmap_page_protect(pg, VM_PROT_NONE);
    666        1.5       mrg 		if (pg->flags & PG_BUSY) {
    667       1.46       chs 			pg->flags |= PG_WANTED;
    668       1.96        ad 			mutex_exit(&uvm_pageqlock);
    669  1.108.4.1     rmind 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, false,
    670       1.46       chs 			    "uao_det", 0);
    671  1.108.4.1     rmind 			mutex_enter(uobj->vmobjlock);
    672       1.96        ad 			mutex_enter(&uvm_pageqlock);
    673        1.5       mrg 			continue;
    674        1.5       mrg 		}
    675       1.18       chs 		uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
    676        1.5       mrg 		uvm_pagefree(pg);
    677        1.5       mrg 	}
    678       1.96        ad 	mutex_exit(&uvm_pageqlock);
    679        1.1       mrg 
    680        1.5       mrg 	/*
    681       1.46       chs  	 * finally, free the aobj itself.
    682        1.5       mrg  	 */
    683        1.1       mrg 
    684        1.5       mrg 	uao_free(aobj);
    685        1.5       mrg }
    686        1.1       mrg 
    687        1.1       mrg /*
    688       1.46       chs  * uao_put: flush pages out of a uvm object
    689       1.22   thorpej  *
    690       1.22   thorpej  * => object should be locked by caller.  we may _unlock_ the object
    691       1.22   thorpej  *	if (and only if) we need to clean a page (PGO_CLEANIT).
    692       1.22   thorpej  *	XXXJRT Currently, however, we don't.  In the case of cleaning
    693       1.22   thorpej  *	XXXJRT a page, we simply just deactivate it.  Should probably
    694       1.22   thorpej  *	XXXJRT handle this better, in the future (although "flushing"
    695       1.22   thorpej  *	XXXJRT anonymous memory isn't terribly important).
    696       1.22   thorpej  * => if PGO_CLEANIT is not set, then we will neither unlock the object
    697       1.22   thorpej  *	or block.
    698       1.22   thorpej  * => if PGO_ALLPAGE is set, then all pages in the object are valid targets
    699       1.22   thorpej  *	for flushing.
    700       1.22   thorpej  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    701       1.22   thorpej  *	that new pages are inserted on the tail end of the list.  thus,
    702       1.22   thorpej  *	we can make a complete pass through the object in one go by starting
    703       1.22   thorpej  *	at the head and working towards the tail (new pages are put in
    704       1.22   thorpej  *	front of us).
    705       1.22   thorpej  * => NOTE: we are allowed to lock the page queues, so the caller
    706       1.22   thorpej  *	must not be holding the lock on them [e.g. pagedaemon had
    707       1.22   thorpej  *	better not call us with the queues locked]
    708       1.86      matt  * => we return 0 unless we encountered some sort of I/O error
    709       1.22   thorpej  *	XXXJRT currently never happens, as we never directly initiate
    710       1.22   thorpej  *	XXXJRT I/O
    711       1.22   thorpej  *
    712       1.22   thorpej  * note on page traversal:
    713       1.22   thorpej  *	we can traverse the pages in an object either by going down the
    714       1.22   thorpej  *	linked list in "uobj->memq", or we can go over the address range
    715       1.22   thorpej  *	by page doing hash table lookups for each address.  depending
    716       1.22   thorpej  *	on how many pages are in the object it may be cheaper to do one
    717       1.22   thorpej  *	or the other.  we set "by_list" to true if we are using memq.
    718       1.22   thorpej  *	if the cost of a hash lookup was equal to the cost of the list
    719       1.22   thorpej  *	traversal we could compare the number of pages in the start->stop
    720       1.22   thorpej  *	range to the total number of pages in the object.  however, it
    721       1.22   thorpej  *	seems that a hash table lookup is more expensive than the linked
    722       1.22   thorpej  *	list traversal, so we multiply the number of pages in the
    723       1.22   thorpej  *	start->stop range by a penalty which we define below.
    724        1.1       mrg  */
    725       1.22   thorpej 
    726       1.68   thorpej static int
    727       1.67   thorpej uao_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
    728        1.5       mrg {
    729       1.46       chs 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    730       1.51     enami 	struct vm_page *pg, *nextpg, curmp, endmp;
    731       1.85   thorpej 	bool by_list;
    732       1.28    kleink 	voff_t curoff;
    733       1.46       chs 	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
    734       1.22   thorpej 
    735  1.108.4.1     rmind 	KASSERT(mutex_owned(uobj->vmobjlock));
    736       1.96        ad 
    737       1.46       chs 	curoff = 0;
    738       1.22   thorpej 	if (flags & PGO_ALLPAGES) {
    739       1.22   thorpej 		start = 0;
    740       1.22   thorpej 		stop = aobj->u_pages << PAGE_SHIFT;
    741       1.86      matt 		by_list = true;		/* always go by the list */
    742       1.22   thorpej 	} else {
    743       1.22   thorpej 		start = trunc_page(start);
    744       1.71      yamt 		if (stop == 0) {
    745       1.71      yamt 			stop = aobj->u_pages << PAGE_SHIFT;
    746       1.71      yamt 		} else {
    747       1.71      yamt 			stop = round_page(stop);
    748       1.71      yamt 		}
    749       1.22   thorpej 		if (stop > (aobj->u_pages << PAGE_SHIFT)) {
    750       1.22   thorpej 			printf("uao_flush: strange, got an out of range "
    751       1.22   thorpej 			    "flush (fixed)\n");
    752       1.22   thorpej 			stop = aobj->u_pages << PAGE_SHIFT;
    753       1.22   thorpej 		}
    754       1.22   thorpej 		by_list = (uobj->uo_npages <=
    755      1.105      yamt 		    ((stop - start) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    756       1.22   thorpej 	}
    757       1.22   thorpej 	UVMHIST_LOG(maphist,
    758       1.22   thorpej 	    " flush start=0x%lx, stop=0x%x, by_list=%d, flags=0x%x",
    759       1.22   thorpej 	    start, stop, by_list, flags);
    760        1.1       mrg 
    761        1.5       mrg 	/*
    762       1.22   thorpej 	 * Don't need to do any work here if we're not freeing
    763       1.22   thorpej 	 * or deactivating pages.
    764       1.22   thorpej 	 */
    765       1.46       chs 
    766       1.22   thorpej 	if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) {
    767  1.108.4.1     rmind 		mutex_exit(uobj->vmobjlock);
    768       1.46       chs 		return 0;
    769       1.22   thorpej 	}
    770       1.22   thorpej 
    771        1.5       mrg 	/*
    772       1.51     enami 	 * Initialize the marker pages.  See the comment in
    773       1.51     enami 	 * genfs_putpages() also.
    774       1.51     enami 	 */
    775       1.51     enami 
    776  1.108.4.3     rmind 	curmp.flags = PG_MARKER;
    777  1.108.4.3     rmind 	endmp.flags = PG_MARKER;
    778       1.51     enami 
    779       1.51     enami 	/*
    780       1.46       chs 	 * now do it.  note: we must update nextpg in the body of loop or we
    781       1.51     enami 	 * will get stuck.  we need to use nextpg if we'll traverse the list
    782       1.51     enami 	 * because we may free "pg" before doing the next loop.
    783       1.21   thorpej 	 */
    784       1.22   thorpej 
    785       1.22   thorpej 	if (by_list) {
    786      1.102        ad 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    787       1.51     enami 		nextpg = TAILQ_FIRST(&uobj->memq);
    788       1.22   thorpej 	} else {
    789       1.22   thorpej 		curoff = start;
    790       1.52       scw 		nextpg = NULL;	/* Quell compiler warning */
    791       1.22   thorpej 	}
    792       1.22   thorpej 
    793       1.99        ad 	/* locked: uobj */
    794       1.51     enami 	for (;;) {
    795       1.22   thorpej 		if (by_list) {
    796       1.51     enami 			pg = nextpg;
    797       1.51     enami 			if (pg == &endmp)
    798       1.51     enami 				break;
    799      1.102        ad 			nextpg = TAILQ_NEXT(pg, listq.queue);
    800  1.108.4.3     rmind 			if (pg->flags & PG_MARKER)
    801  1.108.4.3     rmind 				continue;
    802       1.46       chs 			if (pg->offset < start || pg->offset >= stop)
    803       1.22   thorpej 				continue;
    804       1.22   thorpej 		} else {
    805       1.51     enami 			if (curoff < stop) {
    806       1.51     enami 				pg = uvm_pagelookup(uobj, curoff);
    807       1.51     enami 				curoff += PAGE_SIZE;
    808       1.51     enami 			} else
    809       1.51     enami 				break;
    810       1.46       chs 			if (pg == NULL)
    811       1.22   thorpej 				continue;
    812       1.22   thorpej 		}
    813       1.98      yamt 
    814       1.98      yamt 		/*
    815       1.98      yamt 		 * wait and try again if the page is busy.
    816       1.98      yamt 		 */
    817       1.98      yamt 
    818       1.98      yamt 		if (pg->flags & PG_BUSY) {
    819       1.98      yamt 			if (by_list) {
    820      1.102        ad 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
    821       1.98      yamt 			}
    822       1.98      yamt 			pg->flags |= PG_WANTED;
    823  1.108.4.1     rmind 			UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0,
    824       1.98      yamt 			    "uao_put", 0);
    825  1.108.4.1     rmind 			mutex_enter(uobj->vmobjlock);
    826       1.98      yamt 			if (by_list) {
    827      1.102        ad 				nextpg = TAILQ_NEXT(&curmp, listq.queue);
    828       1.98      yamt 				TAILQ_REMOVE(&uobj->memq, &curmp,
    829      1.102        ad 				    listq.queue);
    830       1.98      yamt 			} else
    831       1.98      yamt 				curoff -= PAGE_SIZE;
    832       1.98      yamt 			continue;
    833       1.98      yamt 		}
    834       1.98      yamt 
    835       1.46       chs 		switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
    836       1.41       chs 
    837       1.22   thorpej 		/*
    838       1.22   thorpej 		 * XXX In these first 3 cases, we always just
    839       1.22   thorpej 		 * XXX deactivate the page.  We may want to
    840       1.22   thorpej 		 * XXX handle the different cases more specifically
    841       1.22   thorpej 		 * XXX in the future.
    842       1.22   thorpej 		 */
    843       1.46       chs 
    844       1.22   thorpej 		case PGO_CLEANIT|PGO_FREE:
    845       1.22   thorpej 		case PGO_CLEANIT|PGO_DEACTIVATE:
    846       1.22   thorpej 		case PGO_DEACTIVATE:
    847       1.25   thorpej  deactivate_it:
    848       1.98      yamt 			mutex_enter(&uvm_pageqlock);
    849       1.83      yamt 			/* skip the page if it's wired */
    850       1.98      yamt 			if (pg->wire_count == 0) {
    851       1.98      yamt 				uvm_pagedeactivate(pg);
    852       1.98      yamt 			}
    853       1.98      yamt 			mutex_exit(&uvm_pageqlock);
    854       1.98      yamt 			break;
    855       1.22   thorpej 
    856       1.22   thorpej 		case PGO_FREE:
    857       1.25   thorpej 			/*
    858       1.25   thorpej 			 * If there are multiple references to
    859       1.25   thorpej 			 * the object, just deactivate the page.
    860       1.25   thorpej 			 */
    861       1.46       chs 
    862       1.25   thorpej 			if (uobj->uo_refs > 1)
    863       1.25   thorpej 				goto deactivate_it;
    864       1.25   thorpej 
    865       1.22   thorpej 			/*
    866       1.98      yamt 			 * free the swap slot and the page.
    867       1.22   thorpej 			 */
    868       1.46       chs 
    869       1.46       chs 			pmap_page_protect(pg, VM_PROT_NONE);
    870       1.75      yamt 
    871       1.75      yamt 			/*
    872       1.75      yamt 			 * freeing swapslot here is not strictly necessary.
    873       1.75      yamt 			 * however, leaving it here doesn't save much
    874       1.75      yamt 			 * because we need to update swap accounting anyway.
    875       1.75      yamt 			 */
    876       1.75      yamt 
    877       1.46       chs 			uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
    878       1.98      yamt 			mutex_enter(&uvm_pageqlock);
    879       1.46       chs 			uvm_pagefree(pg);
    880       1.98      yamt 			mutex_exit(&uvm_pageqlock);
    881       1.98      yamt 			break;
    882       1.98      yamt 
    883       1.98      yamt 		default:
    884       1.98      yamt 			panic("%s: impossible", __func__);
    885       1.22   thorpej 		}
    886       1.22   thorpej 	}
    887       1.51     enami 	if (by_list) {
    888      1.102        ad 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
    889       1.89        ad 	}
    890  1.108.4.1     rmind 	mutex_exit(uobj->vmobjlock);
    891       1.46       chs 	return 0;
    892        1.1       mrg }
    893        1.1       mrg 
    894        1.1       mrg /*
    895        1.1       mrg  * uao_get: fetch me a page
    896        1.1       mrg  *
    897        1.1       mrg  * we have three cases:
    898        1.1       mrg  * 1: page is resident     -> just return the page.
    899        1.1       mrg  * 2: page is zero-fill    -> allocate a new page and zero it.
    900        1.1       mrg  * 3: page is swapped out  -> fetch the page from swap.
    901        1.1       mrg  *
    902        1.1       mrg  * cases 1 and 2 can be handled with PGO_LOCKED, case 3 cannot.
    903        1.1       mrg  * so, if the "center" page hits case 3 (or any page, with PGO_ALLPAGES),
    904       1.40       chs  * then we will need to return EBUSY.
    905        1.1       mrg  *
    906        1.1       mrg  * => prefer map unlocked (not required)
    907        1.1       mrg  * => object must be locked!  we will _unlock_ it before starting any I/O.
    908        1.1       mrg  * => flags: PGO_ALLPAGES: get all of the pages
    909        1.1       mrg  *           PGO_LOCKED: fault data structures are locked
    910        1.1       mrg  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
    911        1.1       mrg  * => NOTE: caller must check for released pages!!
    912        1.1       mrg  */
    913       1.46       chs 
    914        1.5       mrg static int
    915       1.67   thorpej uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
    916       1.82      yamt     int *npagesp, int centeridx, vm_prot_t access_type, int advice, int flags)
    917        1.5       mrg {
    918       1.72      yamt #if defined(VMSWAP)
    919        1.5       mrg 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
    920       1.72      yamt #endif /* defined(VMSWAP) */
    921       1.28    kleink 	voff_t current_offset;
    922       1.52       scw 	struct vm_page *ptmp = NULL;	/* Quell compiler warning */
    923       1.72      yamt 	int lcv, gotpages, maxpages, swslot, pageidx;
    924       1.85   thorpej 	bool done;
    925        1.5       mrg 	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
    926        1.5       mrg 
    927       1.27       chs 	UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d",
    928       1.74      yamt 		    (struct uvm_aobj *)uobj, offset, flags,0);
    929       1.37       chs 
    930        1.5       mrg 	/*
    931        1.5       mrg  	 * get number of pages
    932        1.5       mrg  	 */
    933       1.46       chs 
    934        1.5       mrg 	maxpages = *npagesp;
    935        1.5       mrg 
    936        1.5       mrg 	/*
    937        1.5       mrg  	 * step 1: handled the case where fault data structures are locked.
    938        1.5       mrg  	 */
    939        1.1       mrg 
    940        1.5       mrg 	if (flags & PGO_LOCKED) {
    941       1.46       chs 
    942        1.5       mrg 		/*
    943        1.5       mrg  		 * step 1a: get pages that are already resident.   only do
    944        1.5       mrg 		 * this if the data structures are locked (i.e. the first
    945        1.5       mrg 		 * time through).
    946        1.5       mrg  		 */
    947        1.5       mrg 
    948       1.87   thorpej 		done = true;	/* be optimistic */
    949        1.5       mrg 		gotpages = 0;	/* # of pages we got so far */
    950        1.5       mrg 		for (lcv = 0, current_offset = offset ; lcv < maxpages ;
    951        1.5       mrg 		    lcv++, current_offset += PAGE_SIZE) {
    952        1.5       mrg 			/* do we care about this page?  if not, skip it */
    953        1.5       mrg 			if (pps[lcv] == PGO_DONTCARE)
    954        1.5       mrg 				continue;
    955        1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
    956        1.5       mrg 
    957        1.5       mrg 			/*
    958       1.30   thorpej  			 * if page is new, attempt to allocate the page,
    959       1.30   thorpej 			 * zero-fill'd.
    960        1.5       mrg  			 */
    961       1.46       chs 
    962       1.46       chs 			if (ptmp == NULL && uao_find_swslot(&aobj->u_obj,
    963       1.15       chs 			    current_offset >> PAGE_SHIFT) == 0) {
    964        1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
    965       1.30   thorpej 				    NULL, UVM_PGA_ZERO);
    966        1.5       mrg 				if (ptmp) {
    967        1.5       mrg 					/* new page */
    968       1.47       chs 					ptmp->flags &= ~(PG_FAKE);
    969        1.5       mrg 					ptmp->pqflags |= PQ_AOBJ;
    970       1.47       chs 					goto gotpage;
    971        1.5       mrg 				}
    972        1.5       mrg 			}
    973        1.5       mrg 
    974        1.5       mrg 			/*
    975       1.46       chs 			 * to be useful must get a non-busy page
    976        1.5       mrg 			 */
    977       1.46       chs 
    978       1.46       chs 			if (ptmp == NULL || (ptmp->flags & PG_BUSY) != 0) {
    979        1.5       mrg 				if (lcv == centeridx ||
    980        1.5       mrg 				    (flags & PGO_ALLPAGES) != 0)
    981        1.5       mrg 					/* need to do a wait or I/O! */
    982       1.87   thorpej 					done = false;
    983        1.5       mrg 					continue;
    984        1.5       mrg 			}
    985        1.5       mrg 
    986        1.5       mrg 			/*
    987        1.5       mrg 			 * useful page: busy/lock it and plug it in our
    988        1.5       mrg 			 * result array
    989        1.5       mrg 			 */
    990       1.46       chs 
    991        1.5       mrg 			/* caller must un-busy this page */
    992       1.41       chs 			ptmp->flags |= PG_BUSY;
    993        1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get1");
    994       1.47       chs gotpage:
    995        1.5       mrg 			pps[lcv] = ptmp;
    996        1.5       mrg 			gotpages++;
    997       1.46       chs 		}
    998        1.5       mrg 
    999        1.5       mrg 		/*
   1000        1.5       mrg  		 * step 1b: now we've either done everything needed or we
   1001        1.5       mrg 		 * to unlock and do some waiting or I/O.
   1002        1.5       mrg  		 */
   1003        1.5       mrg 
   1004        1.5       mrg 		UVMHIST_LOG(pdhist, "<- done (done=%d)", done, 0,0,0);
   1005        1.5       mrg 		*npagesp = gotpages;
   1006        1.5       mrg 		if (done)
   1007       1.46       chs 			return 0;
   1008        1.5       mrg 		else
   1009       1.46       chs 			return EBUSY;
   1010        1.1       mrg 	}
   1011        1.1       mrg 
   1012        1.5       mrg 	/*
   1013        1.5       mrg  	 * step 2: get non-resident or busy pages.
   1014        1.5       mrg  	 * object is locked.   data structures are unlocked.
   1015        1.5       mrg  	 */
   1016        1.5       mrg 
   1017       1.76      yamt 	if ((flags & PGO_SYNCIO) == 0) {
   1018       1.76      yamt 		goto done;
   1019       1.76      yamt 	}
   1020       1.76      yamt 
   1021        1.5       mrg 	for (lcv = 0, current_offset = offset ; lcv < maxpages ;
   1022        1.5       mrg 	    lcv++, current_offset += PAGE_SIZE) {
   1023       1.27       chs 
   1024        1.5       mrg 		/*
   1025        1.5       mrg 		 * - skip over pages we've already gotten or don't want
   1026        1.5       mrg 		 * - skip over pages we don't _have_ to get
   1027        1.5       mrg 		 */
   1028       1.27       chs 
   1029        1.5       mrg 		if (pps[lcv] != NULL ||
   1030        1.5       mrg 		    (lcv != centeridx && (flags & PGO_ALLPAGES) == 0))
   1031        1.5       mrg 			continue;
   1032        1.5       mrg 
   1033       1.27       chs 		pageidx = current_offset >> PAGE_SHIFT;
   1034       1.27       chs 
   1035        1.5       mrg 		/*
   1036        1.5       mrg  		 * we have yet to locate the current page (pps[lcv]).   we
   1037        1.5       mrg 		 * first look for a page that is already at the current offset.
   1038        1.5       mrg 		 * if we find a page, we check to see if it is busy or
   1039        1.5       mrg 		 * released.  if that is the case, then we sleep on the page
   1040        1.5       mrg 		 * until it is no longer busy or released and repeat the lookup.
   1041        1.5       mrg 		 * if the page we found is neither busy nor released, then we
   1042        1.5       mrg 		 * busy it (so we own it) and plug it into pps[lcv].   this
   1043        1.5       mrg 		 * 'break's the following while loop and indicates we are
   1044        1.5       mrg 		 * ready to move on to the next page in the "lcv" loop above.
   1045        1.5       mrg  		 *
   1046        1.5       mrg  		 * if we exit the while loop with pps[lcv] still set to NULL,
   1047        1.5       mrg 		 * then it means that we allocated a new busy/fake/clean page
   1048        1.5       mrg 		 * ptmp in the object and we need to do I/O to fill in the data.
   1049        1.5       mrg  		 */
   1050        1.5       mrg 
   1051        1.5       mrg 		/* top of "pps" while loop */
   1052        1.5       mrg 		while (pps[lcv] == NULL) {
   1053        1.5       mrg 			/* look for a resident page */
   1054        1.5       mrg 			ptmp = uvm_pagelookup(uobj, current_offset);
   1055        1.5       mrg 
   1056        1.5       mrg 			/* not resident?   allocate one now (if we can) */
   1057        1.5       mrg 			if (ptmp == NULL) {
   1058        1.5       mrg 
   1059        1.5       mrg 				ptmp = uvm_pagealloc(uobj, current_offset,
   1060       1.19       chs 				    NULL, 0);
   1061        1.5       mrg 
   1062        1.5       mrg 				/* out of RAM? */
   1063        1.5       mrg 				if (ptmp == NULL) {
   1064  1.108.4.1     rmind 					mutex_exit(uobj->vmobjlock);
   1065        1.5       mrg 					UVMHIST_LOG(pdhist,
   1066        1.5       mrg 					    "sleeping, ptmp == NULL\n",0,0,0,0);
   1067        1.5       mrg 					uvm_wait("uao_getpage");
   1068  1.108.4.1     rmind 					mutex_enter(uobj->vmobjlock);
   1069       1.41       chs 					continue;
   1070        1.5       mrg 				}
   1071        1.5       mrg 
   1072        1.5       mrg 				/*
   1073        1.5       mrg 				 * safe with PQ's unlocked: because we just
   1074        1.5       mrg 				 * alloc'd the page
   1075        1.5       mrg 				 */
   1076       1.46       chs 
   1077        1.5       mrg 				ptmp->pqflags |= PQ_AOBJ;
   1078        1.5       mrg 
   1079       1.41       chs 				/*
   1080        1.5       mrg 				 * got new page ready for I/O.  break pps while
   1081        1.5       mrg 				 * loop.  pps[lcv] is still NULL.
   1082        1.5       mrg 				 */
   1083       1.46       chs 
   1084        1.5       mrg 				break;
   1085        1.5       mrg 			}
   1086        1.5       mrg 
   1087        1.5       mrg 			/* page is there, see if we need to wait on it */
   1088       1.46       chs 			if ((ptmp->flags & PG_BUSY) != 0) {
   1089        1.5       mrg 				ptmp->flags |= PG_WANTED;
   1090        1.5       mrg 				UVMHIST_LOG(pdhist,
   1091        1.5       mrg 				    "sleeping, ptmp->flags 0x%x\n",
   1092        1.5       mrg 				    ptmp->flags,0,0,0);
   1093  1.108.4.1     rmind 				UVM_UNLOCK_AND_WAIT(ptmp, uobj->vmobjlock,
   1094       1.87   thorpej 				    false, "uao_get", 0);
   1095  1.108.4.1     rmind 				mutex_enter(uobj->vmobjlock);
   1096       1.46       chs 				continue;
   1097        1.5       mrg 			}
   1098       1.41       chs 
   1099       1.41       chs 			/*
   1100        1.5       mrg  			 * if we get here then the page has become resident and
   1101        1.5       mrg 			 * unbusy between steps 1 and 2.  we busy it now (so we
   1102        1.5       mrg 			 * own it) and set pps[lcv] (so that we exit the while
   1103        1.5       mrg 			 * loop).
   1104        1.5       mrg  			 */
   1105       1.46       chs 
   1106        1.5       mrg 			/* we own it, caller must un-busy */
   1107        1.5       mrg 			ptmp->flags |= PG_BUSY;
   1108        1.5       mrg 			UVM_PAGE_OWN(ptmp, "uao_get2");
   1109        1.5       mrg 			pps[lcv] = ptmp;
   1110        1.5       mrg 		}
   1111        1.5       mrg 
   1112        1.5       mrg 		/*
   1113        1.5       mrg  		 * if we own the valid page at the correct offset, pps[lcv] will
   1114        1.5       mrg  		 * point to it.   nothing more to do except go to the next page.
   1115        1.5       mrg  		 */
   1116       1.46       chs 
   1117        1.5       mrg 		if (pps[lcv])
   1118        1.5       mrg 			continue;			/* next lcv */
   1119        1.5       mrg 
   1120        1.5       mrg 		/*
   1121       1.41       chs  		 * we have a "fake/busy/clean" page that we just allocated.
   1122        1.5       mrg  		 * do the needed "i/o", either reading from swap or zeroing.
   1123        1.5       mrg  		 */
   1124       1.46       chs 
   1125       1.46       chs 		swslot = uao_find_swslot(&aobj->u_obj, pageidx);
   1126        1.5       mrg 
   1127        1.5       mrg 		/*
   1128        1.5       mrg  		 * just zero the page if there's nothing in swap.
   1129        1.5       mrg  		 */
   1130       1.46       chs 
   1131       1.46       chs 		if (swslot == 0) {
   1132       1.46       chs 
   1133        1.5       mrg 			/*
   1134        1.5       mrg 			 * page hasn't existed before, just zero it.
   1135        1.5       mrg 			 */
   1136       1.46       chs 
   1137        1.5       mrg 			uvm_pagezero(ptmp);
   1138       1.27       chs 		} else {
   1139       1.72      yamt #if defined(VMSWAP)
   1140       1.72      yamt 			int error;
   1141       1.72      yamt 
   1142        1.5       mrg 			UVMHIST_LOG(pdhist, "pagein from swslot %d",
   1143        1.5       mrg 			     swslot, 0,0,0);
   1144        1.5       mrg 
   1145        1.5       mrg 			/*
   1146        1.5       mrg 			 * page in the swapped-out page.
   1147        1.5       mrg 			 * unlock object for i/o, relock when done.
   1148        1.5       mrg 			 */
   1149       1.46       chs 
   1150  1.108.4.1     rmind 			mutex_exit(uobj->vmobjlock);
   1151       1.46       chs 			error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO);
   1152  1.108.4.1     rmind 			mutex_enter(uobj->vmobjlock);
   1153        1.5       mrg 
   1154        1.5       mrg 			/*
   1155        1.5       mrg 			 * I/O done.  check for errors.
   1156        1.5       mrg 			 */
   1157       1.46       chs 
   1158       1.46       chs 			if (error != 0) {
   1159        1.5       mrg 				UVMHIST_LOG(pdhist, "<- done (error=%d)",
   1160       1.46       chs 				    error,0,0,0);
   1161        1.5       mrg 				if (ptmp->flags & PG_WANTED)
   1162       1.24   thorpej 					wakeup(ptmp);
   1163       1.27       chs 
   1164       1.27       chs 				/*
   1165       1.27       chs 				 * remove the swap slot from the aobj
   1166       1.27       chs 				 * and mark the aobj as having no real slot.
   1167       1.27       chs 				 * don't free the swap slot, thus preventing
   1168       1.27       chs 				 * it from being used again.
   1169       1.27       chs 				 */
   1170       1.46       chs 
   1171       1.27       chs 				swslot = uao_set_swslot(&aobj->u_obj, pageidx,
   1172       1.27       chs 							SWSLOT_BAD);
   1173       1.57        pk 				if (swslot > 0) {
   1174       1.45       chs 					uvm_swap_markbad(swslot, 1);
   1175       1.45       chs 				}
   1176       1.27       chs 
   1177       1.96        ad 				mutex_enter(&uvm_pageqlock);
   1178        1.5       mrg 				uvm_pagefree(ptmp);
   1179       1.96        ad 				mutex_exit(&uvm_pageqlock);
   1180  1.108.4.1     rmind 				mutex_exit(uobj->vmobjlock);
   1181       1.46       chs 				return error;
   1182        1.5       mrg 			}
   1183       1.72      yamt #else /* defined(VMSWAP) */
   1184       1.72      yamt 			panic("%s: pagein", __func__);
   1185       1.72      yamt #endif /* defined(VMSWAP) */
   1186        1.5       mrg 		}
   1187        1.5       mrg 
   1188       1.78      yamt 		if ((access_type & VM_PROT_WRITE) == 0) {
   1189       1.78      yamt 			ptmp->flags |= PG_CLEAN;
   1190       1.78      yamt 			pmap_clear_modify(ptmp);
   1191       1.78      yamt 		}
   1192       1.78      yamt 
   1193       1.41       chs 		/*
   1194        1.5       mrg  		 * we got the page!   clear the fake flag (indicates valid
   1195        1.5       mrg 		 * data now in page) and plug into our result array.   note
   1196       1.41       chs 		 * that page is still busy.
   1197        1.5       mrg  		 *
   1198        1.5       mrg  		 * it is the callers job to:
   1199        1.5       mrg  		 * => check if the page is released
   1200        1.5       mrg  		 * => unbusy the page
   1201        1.5       mrg  		 * => activate the page
   1202        1.5       mrg  		 */
   1203        1.5       mrg 
   1204       1.46       chs 		ptmp->flags &= ~PG_FAKE;
   1205        1.5       mrg 		pps[lcv] = ptmp;
   1206       1.46       chs 	}
   1207        1.1       mrg 
   1208        1.1       mrg 	/*
   1209        1.5       mrg  	 * finally, unlock object and return.
   1210        1.5       mrg  	 */
   1211        1.1       mrg 
   1212       1.76      yamt done:
   1213  1.108.4.1     rmind 	mutex_exit(uobj->vmobjlock);
   1214        1.5       mrg 	UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0);
   1215       1.46       chs 	return 0;
   1216        1.1       mrg }
   1217        1.1       mrg 
   1218       1.72      yamt #if defined(VMSWAP)
   1219       1.72      yamt 
   1220        1.1       mrg /*
   1221       1.18       chs  * uao_dropswap:  release any swap resources from this aobj page.
   1222       1.41       chs  *
   1223       1.18       chs  * => aobj must be locked or have a reference count of 0.
   1224       1.18       chs  */
   1225       1.18       chs 
   1226       1.18       chs void
   1227       1.67   thorpej uao_dropswap(struct uvm_object *uobj, int pageidx)
   1228       1.18       chs {
   1229       1.18       chs 	int slot;
   1230       1.18       chs 
   1231       1.18       chs 	slot = uao_set_swslot(uobj, pageidx, 0);
   1232       1.18       chs 	if (slot) {
   1233       1.18       chs 		uvm_swap_free(slot, 1);
   1234       1.18       chs 	}
   1235       1.27       chs }
   1236       1.27       chs 
   1237       1.27       chs /*
   1238       1.27       chs  * page in every page in every aobj that is paged-out to a range of swslots.
   1239       1.41       chs  *
   1240       1.27       chs  * => nothing should be locked.
   1241       1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1242       1.27       chs  */
   1243       1.46       chs 
   1244       1.85   thorpej bool
   1245       1.67   thorpej uao_swap_off(int startslot, int endslot)
   1246       1.27       chs {
   1247       1.27       chs 	struct uvm_aobj *aobj, *nextaobj;
   1248       1.85   thorpej 	bool rv;
   1249       1.27       chs 
   1250       1.27       chs 	/*
   1251       1.27       chs 	 * walk the list of all aobjs.
   1252       1.27       chs 	 */
   1253       1.27       chs 
   1254       1.27       chs restart:
   1255       1.90        ad 	mutex_enter(&uao_list_lock);
   1256       1.27       chs 	for (aobj = LIST_FIRST(&uao_list);
   1257       1.27       chs 	     aobj != NULL;
   1258       1.27       chs 	     aobj = nextaobj) {
   1259       1.27       chs 
   1260       1.27       chs 		/*
   1261       1.46       chs 		 * try to get the object lock, start all over if we fail.
   1262       1.27       chs 		 * most of the time we'll get the aobj lock,
   1263       1.27       chs 		 * so this should be a rare case.
   1264       1.27       chs 		 */
   1265       1.46       chs 
   1266  1.108.4.1     rmind 		if (!mutex_tryenter(aobj->u_obj.vmobjlock)) {
   1267       1.90        ad 			mutex_exit(&uao_list_lock);
   1268       1.96        ad 			/* XXX Better than yielding but inadequate. */
   1269       1.96        ad 			kpause("livelock", false, 1, NULL);
   1270       1.27       chs 			goto restart;
   1271       1.27       chs 		}
   1272       1.27       chs 
   1273       1.27       chs 		/*
   1274       1.27       chs 		 * add a ref to the aobj so it doesn't disappear
   1275       1.27       chs 		 * while we're working.
   1276       1.27       chs 		 */
   1277       1.46       chs 
   1278       1.27       chs 		uao_reference_locked(&aobj->u_obj);
   1279       1.27       chs 
   1280       1.27       chs 		/*
   1281       1.27       chs 		 * now it's safe to unlock the uao list.
   1282       1.27       chs 		 */
   1283       1.46       chs 
   1284       1.90        ad 		mutex_exit(&uao_list_lock);
   1285       1.27       chs 
   1286       1.27       chs 		/*
   1287       1.27       chs 		 * page in any pages in the swslot range.
   1288       1.27       chs 		 * if there's an error, abort and return the error.
   1289       1.27       chs 		 */
   1290       1.46       chs 
   1291       1.27       chs 		rv = uao_pagein(aobj, startslot, endslot);
   1292       1.27       chs 		if (rv) {
   1293       1.27       chs 			uao_detach_locked(&aobj->u_obj);
   1294       1.27       chs 			return rv;
   1295       1.27       chs 		}
   1296       1.27       chs 
   1297       1.27       chs 		/*
   1298       1.27       chs 		 * we're done with this aobj.
   1299       1.27       chs 		 * relock the list and drop our ref on the aobj.
   1300       1.27       chs 		 */
   1301       1.46       chs 
   1302       1.90        ad 		mutex_enter(&uao_list_lock);
   1303       1.27       chs 		nextaobj = LIST_NEXT(aobj, u_list);
   1304       1.27       chs 		uao_detach_locked(&aobj->u_obj);
   1305       1.27       chs 	}
   1306       1.27       chs 
   1307       1.27       chs 	/*
   1308       1.27       chs 	 * done with traversal, unlock the list
   1309       1.27       chs 	 */
   1310       1.90        ad 	mutex_exit(&uao_list_lock);
   1311       1.87   thorpej 	return false;
   1312       1.27       chs }
   1313       1.27       chs 
   1314       1.27       chs 
   1315       1.27       chs /*
   1316       1.27       chs  * page in any pages from aobj in the given range.
   1317       1.27       chs  *
   1318       1.27       chs  * => aobj must be locked and is returned locked.
   1319       1.87   thorpej  * => returns true if pagein was aborted due to lack of memory.
   1320       1.27       chs  */
   1321       1.85   thorpej static bool
   1322       1.67   thorpej uao_pagein(struct uvm_aobj *aobj, int startslot, int endslot)
   1323       1.27       chs {
   1324       1.85   thorpej 	bool rv;
   1325       1.27       chs 
   1326       1.27       chs 	if (UAO_USES_SWHASH(aobj)) {
   1327       1.27       chs 		struct uao_swhash_elt *elt;
   1328       1.65  christos 		int buck;
   1329       1.27       chs 
   1330       1.27       chs restart:
   1331       1.65  christos 		for (buck = aobj->u_swhashmask; buck >= 0; buck--) {
   1332       1.65  christos 			for (elt = LIST_FIRST(&aobj->u_swhash[buck]);
   1333       1.27       chs 			     elt != NULL;
   1334       1.27       chs 			     elt = LIST_NEXT(elt, list)) {
   1335       1.27       chs 				int i;
   1336       1.27       chs 
   1337       1.27       chs 				for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) {
   1338       1.27       chs 					int slot = elt->slots[i];
   1339       1.27       chs 
   1340       1.27       chs 					/*
   1341       1.27       chs 					 * if the slot isn't in range, skip it.
   1342       1.27       chs 					 */
   1343       1.46       chs 
   1344       1.41       chs 					if (slot < startslot ||
   1345       1.27       chs 					    slot >= endslot) {
   1346       1.27       chs 						continue;
   1347       1.27       chs 					}
   1348       1.27       chs 
   1349       1.27       chs 					/*
   1350       1.27       chs 					 * process the page,
   1351       1.27       chs 					 * the start over on this object
   1352       1.27       chs 					 * since the swhash elt
   1353       1.27       chs 					 * may have been freed.
   1354       1.27       chs 					 */
   1355       1.46       chs 
   1356       1.27       chs 					rv = uao_pagein_page(aobj,
   1357       1.27       chs 					  UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i);
   1358       1.27       chs 					if (rv) {
   1359       1.27       chs 						return rv;
   1360       1.27       chs 					}
   1361       1.27       chs 					goto restart;
   1362       1.27       chs 				}
   1363       1.27       chs 			}
   1364       1.27       chs 		}
   1365       1.27       chs 	} else {
   1366       1.27       chs 		int i;
   1367       1.27       chs 
   1368       1.27       chs 		for (i = 0; i < aobj->u_pages; i++) {
   1369       1.27       chs 			int slot = aobj->u_swslots[i];
   1370       1.27       chs 
   1371       1.27       chs 			/*
   1372       1.27       chs 			 * if the slot isn't in range, skip it
   1373       1.27       chs 			 */
   1374       1.46       chs 
   1375       1.27       chs 			if (slot < startslot || slot >= endslot) {
   1376       1.27       chs 				continue;
   1377       1.27       chs 			}
   1378       1.27       chs 
   1379       1.27       chs 			/*
   1380       1.27       chs 			 * process the page.
   1381       1.27       chs 			 */
   1382       1.46       chs 
   1383       1.27       chs 			rv = uao_pagein_page(aobj, i);
   1384       1.27       chs 			if (rv) {
   1385       1.27       chs 				return rv;
   1386       1.27       chs 			}
   1387       1.27       chs 		}
   1388       1.27       chs 	}
   1389       1.27       chs 
   1390       1.87   thorpej 	return false;
   1391       1.27       chs }
   1392       1.27       chs 
   1393       1.27       chs /*
   1394       1.27       chs  * page in a page from an aobj.  used for swap_off.
   1395       1.87   thorpej  * returns true if pagein was aborted due to lack of memory.
   1396       1.27       chs  *
   1397       1.27       chs  * => aobj must be locked and is returned locked.
   1398       1.27       chs  */
   1399       1.46       chs 
   1400       1.85   thorpej static bool
   1401       1.67   thorpej uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
   1402       1.27       chs {
   1403       1.27       chs 	struct vm_page *pg;
   1404       1.57        pk 	int rv, npages;
   1405       1.27       chs 
   1406       1.27       chs 	pg = NULL;
   1407       1.27       chs 	npages = 1;
   1408       1.27       chs 	/* locked: aobj */
   1409       1.27       chs 	rv = uao_get(&aobj->u_obj, pageidx << PAGE_SHIFT,
   1410       1.77      yamt 	    &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, PGO_SYNCIO);
   1411       1.27       chs 	/* unlocked: aobj */
   1412       1.27       chs 
   1413       1.27       chs 	/*
   1414       1.27       chs 	 * relock and finish up.
   1415       1.27       chs 	 */
   1416       1.46       chs 
   1417  1.108.4.1     rmind 	mutex_enter(aobj->u_obj.vmobjlock);
   1418       1.27       chs 	switch (rv) {
   1419       1.40       chs 	case 0:
   1420       1.27       chs 		break;
   1421       1.27       chs 
   1422       1.40       chs 	case EIO:
   1423       1.40       chs 	case ERESTART:
   1424       1.46       chs 
   1425       1.27       chs 		/*
   1426       1.27       chs 		 * nothing more to do on errors.
   1427       1.40       chs 		 * ERESTART can only mean that the anon was freed,
   1428       1.27       chs 		 * so again there's nothing to do.
   1429       1.27       chs 		 */
   1430       1.46       chs 
   1431       1.87   thorpej 		return false;
   1432       1.59        pk 
   1433       1.59        pk 	default:
   1434       1.87   thorpej 		return true;
   1435       1.27       chs 	}
   1436       1.27       chs 
   1437       1.27       chs 	/*
   1438       1.27       chs 	 * ok, we've got the page now.
   1439       1.27       chs 	 * mark it as dirty, clear its swslot and un-busy it.
   1440       1.27       chs 	 */
   1441       1.57        pk 	uao_dropswap(&aobj->u_obj, pageidx);
   1442       1.27       chs 
   1443       1.27       chs 	/*
   1444       1.80      yamt 	 * make sure it's on a page queue.
   1445       1.27       chs 	 */
   1446       1.96        ad 	mutex_enter(&uvm_pageqlock);
   1447       1.58        pk 	if (pg->wire_count == 0)
   1448       1.80      yamt 		uvm_pageenqueue(pg);
   1449       1.96        ad 	mutex_exit(&uvm_pageqlock);
   1450       1.56      yamt 
   1451       1.59        pk 	if (pg->flags & PG_WANTED) {
   1452       1.59        pk 		wakeup(pg);
   1453       1.59        pk 	}
   1454       1.59        pk 	pg->flags &= ~(PG_WANTED|PG_BUSY|PG_CLEAN|PG_FAKE);
   1455       1.56      yamt 	UVM_PAGE_OWN(pg, NULL);
   1456       1.56      yamt 
   1457       1.87   thorpej 	return false;
   1458        1.1       mrg }
   1459       1.72      yamt 
   1460       1.75      yamt /*
   1461       1.75      yamt  * uao_dropswap_range: drop swapslots in the range.
   1462       1.75      yamt  *
   1463       1.75      yamt  * => aobj must be locked and is returned locked.
   1464       1.75      yamt  * => start is inclusive.  end is exclusive.
   1465       1.75      yamt  */
   1466       1.75      yamt 
   1467       1.75      yamt void
   1468       1.75      yamt uao_dropswap_range(struct uvm_object *uobj, voff_t start, voff_t end)
   1469       1.75      yamt {
   1470       1.75      yamt 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
   1471       1.75      yamt 
   1472  1.108.4.1     rmind 	KASSERT(mutex_owned(uobj->vmobjlock));
   1473       1.75      yamt 
   1474       1.75      yamt 	uao_dropswap_range1(aobj, start, end);
   1475       1.75      yamt }
   1476       1.75      yamt 
   1477       1.75      yamt static void
   1478       1.75      yamt uao_dropswap_range1(struct uvm_aobj *aobj, voff_t start, voff_t end)
   1479       1.75      yamt {
   1480       1.75      yamt 	int swpgonlydelta = 0;
   1481       1.75      yamt 
   1482       1.75      yamt 	if (end == 0) {
   1483       1.75      yamt 		end = INT64_MAX;
   1484       1.75      yamt 	}
   1485       1.75      yamt 
   1486       1.75      yamt 	if (UAO_USES_SWHASH(aobj)) {
   1487       1.75      yamt 		int i, hashbuckets = aobj->u_swhashmask + 1;
   1488       1.75      yamt 		voff_t taghi;
   1489       1.75      yamt 		voff_t taglo;
   1490       1.75      yamt 
   1491       1.75      yamt 		taglo = UAO_SWHASH_ELT_TAG(start);
   1492       1.75      yamt 		taghi = UAO_SWHASH_ELT_TAG(end);
   1493       1.75      yamt 
   1494       1.75      yamt 		for (i = 0; i < hashbuckets; i++) {
   1495       1.75      yamt 			struct uao_swhash_elt *elt, *next;
   1496       1.75      yamt 
   1497       1.75      yamt 			for (elt = LIST_FIRST(&aobj->u_swhash[i]);
   1498       1.75      yamt 			     elt != NULL;
   1499       1.75      yamt 			     elt = next) {
   1500       1.75      yamt 				int startidx, endidx;
   1501       1.75      yamt 				int j;
   1502       1.75      yamt 
   1503       1.75      yamt 				next = LIST_NEXT(elt, list);
   1504       1.75      yamt 
   1505       1.75      yamt 				if (elt->tag < taglo || taghi < elt->tag) {
   1506       1.75      yamt 					continue;
   1507       1.75      yamt 				}
   1508       1.75      yamt 
   1509       1.75      yamt 				if (elt->tag == taglo) {
   1510       1.75      yamt 					startidx =
   1511       1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(start);
   1512       1.75      yamt 				} else {
   1513       1.75      yamt 					startidx = 0;
   1514       1.75      yamt 				}
   1515       1.75      yamt 
   1516       1.75      yamt 				if (elt->tag == taghi) {
   1517       1.75      yamt 					endidx =
   1518       1.75      yamt 					    UAO_SWHASH_ELT_PAGESLOT_IDX(end);
   1519       1.75      yamt 				} else {
   1520       1.75      yamt 					endidx = UAO_SWHASH_CLUSTER_SIZE;
   1521       1.75      yamt 				}
   1522       1.75      yamt 
   1523       1.75      yamt 				for (j = startidx; j < endidx; j++) {
   1524       1.75      yamt 					int slot = elt->slots[j];
   1525       1.75      yamt 
   1526       1.75      yamt 					KASSERT(uvm_pagelookup(&aobj->u_obj,
   1527       1.75      yamt 					    (UAO_SWHASH_ELT_PAGEIDX_BASE(elt)
   1528       1.75      yamt 					    + j) << PAGE_SHIFT) == NULL);
   1529       1.75      yamt 					if (slot > 0) {
   1530       1.75      yamt 						uvm_swap_free(slot, 1);
   1531       1.75      yamt 						swpgonlydelta++;
   1532       1.75      yamt 						KASSERT(elt->count > 0);
   1533       1.75      yamt 						elt->slots[j] = 0;
   1534       1.75      yamt 						elt->count--;
   1535       1.75      yamt 					}
   1536       1.75      yamt 				}
   1537       1.75      yamt 
   1538       1.75      yamt 				if (elt->count == 0) {
   1539       1.75      yamt 					LIST_REMOVE(elt, list);
   1540       1.75      yamt 					pool_put(&uao_swhash_elt_pool, elt);
   1541       1.75      yamt 				}
   1542       1.75      yamt 			}
   1543       1.75      yamt 		}
   1544       1.75      yamt 	} else {
   1545       1.75      yamt 		int i;
   1546       1.75      yamt 
   1547       1.75      yamt 		if (aobj->u_pages < end) {
   1548       1.75      yamt 			end = aobj->u_pages;
   1549       1.75      yamt 		}
   1550       1.75      yamt 		for (i = start; i < end; i++) {
   1551       1.75      yamt 			int slot = aobj->u_swslots[i];
   1552       1.75      yamt 
   1553       1.75      yamt 			if (slot > 0) {
   1554       1.75      yamt 				uvm_swap_free(slot, 1);
   1555       1.75      yamt 				swpgonlydelta++;
   1556       1.75      yamt 			}
   1557       1.75      yamt 		}
   1558       1.75      yamt 	}
   1559       1.75      yamt 
   1560       1.75      yamt 	/*
   1561       1.75      yamt 	 * adjust the counter of pages only in swap for all
   1562       1.75      yamt 	 * the swap slots we've freed.
   1563       1.75      yamt 	 */
   1564       1.75      yamt 
   1565       1.75      yamt 	if (swpgonlydelta > 0) {
   1566       1.92        ad 		mutex_enter(&uvm_swap_data_lock);
   1567       1.75      yamt 		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
   1568       1.75      yamt 		uvmexp.swpgonly -= swpgonlydelta;
   1569       1.92        ad 		mutex_exit(&uvm_swap_data_lock);
   1570       1.75      yamt 	}
   1571       1.75      yamt }
   1572       1.75      yamt 
   1573       1.72      yamt #endif /* defined(VMSWAP) */
   1574