Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.136
      1  1.136      yamt /*	$NetBSD: uvm_page.c,v 1.136 2008/06/17 02:30:57 yamt Exp $	*/
      2    1.1       mrg 
      3   1.62       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.62       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20    1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21    1.1       mrg  *    must display the following acknowledgement:
     22    1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23   1.62       chs  *      Washington University, the University of California, Berkeley and
     24    1.1       mrg  *      its contributors.
     25    1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26    1.1       mrg  *    may be used to endorse or promote products derived from this software
     27    1.1       mrg  *    without specific prior written permission.
     28    1.1       mrg  *
     29    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39    1.1       mrg  * SUCH DAMAGE.
     40    1.1       mrg  *
     41    1.1       mrg  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     42    1.4       mrg  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     43    1.1       mrg  *
     44    1.1       mrg  *
     45    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46    1.1       mrg  * All rights reserved.
     47   1.62       chs  *
     48    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50    1.1       mrg  * notice and this permission notice appear in all copies of the
     51    1.1       mrg  * software, derivative works or modified versions, and any portions
     52    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53   1.62       chs  *
     54   1.62       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55   1.62       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57   1.62       chs  *
     58    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59    1.1       mrg  *
     60    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61    1.1       mrg  *  School of Computer Science
     62    1.1       mrg  *  Carnegie Mellon University
     63    1.1       mrg  *  Pittsburgh PA 15213-3890
     64    1.1       mrg  *
     65    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66    1.1       mrg  * rights to redistribute these changes.
     67    1.1       mrg  */
     68    1.1       mrg 
     69    1.1       mrg /*
     70    1.1       mrg  * uvm_page.c: page ops.
     71    1.1       mrg  */
     72   1.71     lukem 
     73   1.71     lukem #include <sys/cdefs.h>
     74  1.136      yamt __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.136 2008/06/17 02:30:57 yamt Exp $");
     75    1.6       mrg 
     76   1.44       chs #include "opt_uvmhist.h"
     77  1.113      yamt #include "opt_readahead.h"
     78   1.44       chs 
     79    1.1       mrg #include <sys/param.h>
     80    1.1       mrg #include <sys/systm.h>
     81    1.1       mrg #include <sys/malloc.h>
     82   1.35   thorpej #include <sys/sched.h>
     83   1.44       chs #include <sys/kernel.h>
     84   1.51       chs #include <sys/vnode.h>
     85   1.68       chs #include <sys/proc.h>
     86  1.126        ad #include <sys/atomic.h>
     87  1.133        ad #include <sys/cpu.h>
     88    1.1       mrg 
     89    1.1       mrg #include <uvm/uvm.h>
     90  1.113      yamt #include <uvm/uvm_pdpolicy.h>
     91    1.1       mrg 
     92    1.1       mrg /*
     93    1.1       mrg  * global vars... XXXCDC: move to uvm. structure.
     94    1.1       mrg  */
     95    1.1       mrg 
     96    1.1       mrg /*
     97    1.1       mrg  * physical memory config is stored in vm_physmem.
     98    1.1       mrg  */
     99    1.1       mrg 
    100    1.1       mrg struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    101    1.1       mrg int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    102    1.1       mrg 
    103    1.1       mrg /*
    104   1.36   thorpej  * Some supported CPUs in a given architecture don't support all
    105   1.36   thorpej  * of the things necessary to do idle page zero'ing efficiently.
    106   1.36   thorpej  * We therefore provide a way to disable it from machdep code here.
    107   1.34   thorpej  */
    108   1.44       chs /*
    109   1.44       chs  * XXX disabled until we can find a way to do this without causing
    110   1.95       wiz  * problems for either CPU caches or DMA latency.
    111   1.44       chs  */
    112  1.119   thorpej bool vm_page_zero_enable = false;
    113   1.34   thorpej 
    114   1.34   thorpej /*
    115    1.1       mrg  * local variables
    116    1.1       mrg  */
    117    1.1       mrg 
    118    1.1       mrg /*
    119   1.88   thorpej  * these variables record the values returned by vm_page_bootstrap,
    120   1.88   thorpej  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    121   1.88   thorpej  * and pmap_startup here also uses them internally.
    122   1.88   thorpej  */
    123   1.88   thorpej 
    124   1.88   thorpej static vaddr_t      virtual_space_start;
    125   1.88   thorpej static vaddr_t      virtual_space_end;
    126   1.88   thorpej 
    127   1.88   thorpej /*
    128   1.60   thorpej  * we allocate an initial number of page colors in uvm_page_init(),
    129   1.60   thorpej  * and remember them.  We may re-color pages as cache sizes are
    130   1.60   thorpej  * discovered during the autoconfiguration phase.  But we can never
    131   1.60   thorpej  * free the initial set of buckets, since they are allocated using
    132   1.60   thorpej  * uvm_pageboot_alloc().
    133   1.60   thorpej  */
    134   1.60   thorpej 
    135  1.119   thorpej static bool have_recolored_pages /* = false */;
    136   1.83   thorpej 
    137   1.83   thorpej MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
    138   1.60   thorpej 
    139   1.91      yamt #ifdef DEBUG
    140   1.91      yamt vaddr_t uvm_zerocheckkva;
    141   1.91      yamt #endif /* DEBUG */
    142   1.91      yamt 
    143   1.60   thorpej /*
    144  1.134        ad  * local prototypes
    145  1.124        ad  */
    146  1.124        ad 
    147  1.134        ad static void uvm_pageinsert(struct vm_page *);
    148  1.134        ad static void uvm_pageremove(struct vm_page *);
    149  1.124        ad 
    150  1.124        ad /*
    151  1.134        ad  * per-object tree of pages
    152    1.1       mrg  */
    153    1.1       mrg 
    154  1.134        ad static signed int
    155  1.134        ad uvm_page_compare_nodes(const struct rb_node *n1, const struct rb_node *n2)
    156  1.134        ad {
    157  1.134        ad 	const struct vm_page *pg1 = (const void *)n1;
    158  1.134        ad 	const struct vm_page *pg2 = (const void *)n2;
    159  1.134        ad 	const voff_t a = pg1->offset;
    160  1.134        ad 	const voff_t b = pg2->offset;
    161  1.134        ad 
    162  1.134        ad 	if (a < b)
    163  1.134        ad 		return 1;
    164  1.134        ad 	if (a > b)
    165  1.134        ad 		return -1;
    166  1.134        ad 	return 0;
    167  1.134        ad }
    168  1.134        ad 
    169  1.134        ad static signed int
    170  1.134        ad uvm_page_compare_key(const struct rb_node *n, const void *key)
    171  1.134        ad {
    172  1.134        ad 	const struct vm_page *pg = (const void *)n;
    173  1.134        ad 	const voff_t a = pg->offset;
    174  1.134        ad 	const voff_t b = *(const voff_t *)key;
    175  1.134        ad 
    176  1.134        ad 	if (a < b)
    177  1.134        ad 		return 1;
    178  1.134        ad 	if (a > b)
    179  1.134        ad 		return -1;
    180  1.134        ad 	return 0;
    181  1.134        ad }
    182  1.134        ad 
    183  1.134        ad const struct rb_tree_ops uvm_page_tree_ops = {
    184  1.134        ad 	.rb_compare_nodes = uvm_page_compare_nodes,
    185  1.134        ad 	.rb_compare_key = uvm_page_compare_key,
    186  1.134        ad };
    187    1.1       mrg 
    188    1.1       mrg /*
    189    1.1       mrg  * inline functions
    190    1.1       mrg  */
    191    1.1       mrg 
    192    1.1       mrg /*
    193  1.134        ad  * uvm_pageinsert: insert a page in the object.
    194    1.1       mrg  *
    195    1.1       mrg  * => caller must lock object
    196    1.1       mrg  * => caller must lock page queues
    197    1.1       mrg  * => call should have already set pg's object and offset pointers
    198    1.1       mrg  *    and bumped the version counter
    199    1.1       mrg  */
    200    1.1       mrg 
    201  1.136      yamt static inline void
    202  1.136      yamt uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
    203  1.136      yamt     struct vm_page *where)
    204    1.1       mrg {
    205    1.1       mrg 
    206  1.136      yamt 	KASSERT(uobj == pg->uobject);
    207  1.127        ad 	KASSERT(mutex_owned(&uobj->vmobjlock));
    208   1.51       chs 	KASSERT((pg->flags & PG_TABLED) == 0);
    209   1.96      yamt 	KASSERT(where == NULL || (where->flags & PG_TABLED));
    210   1.96      yamt 	KASSERT(where == NULL || (where->uobject == uobj));
    211  1.123        ad 
    212   1.94      yamt 	if (UVM_OBJ_IS_VNODE(uobj)) {
    213   1.94      yamt 		if (uobj->uo_npages == 0) {
    214   1.94      yamt 			struct vnode *vp = (struct vnode *)uobj;
    215   1.94      yamt 
    216   1.94      yamt 			vholdl(vp);
    217   1.94      yamt 		}
    218   1.94      yamt 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    219  1.126        ad 			atomic_inc_uint(&uvmexp.execpages);
    220   1.94      yamt 		} else {
    221  1.126        ad 			atomic_inc_uint(&uvmexp.filepages);
    222   1.94      yamt 		}
    223   1.86      yamt 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    224  1.126        ad 		atomic_inc_uint(&uvmexp.anonpages);
    225   1.78       chs 	}
    226   1.78       chs 
    227   1.96      yamt 	if (where)
    228  1.133        ad 		TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
    229   1.96      yamt 	else
    230  1.133        ad 		TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    231    1.7       mrg 	pg->flags |= PG_TABLED;
    232   1.67       chs 	uobj->uo_npages++;
    233    1.1       mrg }
    234    1.1       mrg 
    235  1.136      yamt 
    236  1.136      yamt static inline void
    237  1.136      yamt uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
    238  1.136      yamt {
    239  1.136      yamt 	bool success;
    240  1.136      yamt 
    241  1.136      yamt 	KASSERT(uobj == pg->uobject);
    242  1.136      yamt 	success = rb_tree_insert_node(&uobj->rb_tree, &pg->rb_node);
    243  1.136      yamt 	KASSERT(success);
    244  1.136      yamt }
    245  1.136      yamt 
    246  1.136      yamt static inline void
    247  1.105   thorpej uvm_pageinsert(struct vm_page *pg)
    248   1.96      yamt {
    249  1.136      yamt 	struct uvm_object *uobj = pg->uobject;
    250   1.96      yamt 
    251  1.136      yamt 	uvm_pageinsert_tree(uobj, pg);
    252  1.136      yamt 	uvm_pageinsert_list(uobj, pg, NULL);
    253   1.96      yamt }
    254   1.96      yamt 
    255    1.1       mrg /*
    256  1.134        ad  * uvm_page_remove: remove page from object.
    257    1.1       mrg  *
    258    1.1       mrg  * => caller must lock object
    259    1.1       mrg  * => caller must lock page queues
    260    1.1       mrg  */
    261    1.1       mrg 
    262  1.109     perry static inline void
    263  1.136      yamt uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
    264    1.1       mrg {
    265    1.1       mrg 
    266  1.136      yamt 	KASSERT(uobj == pg->uobject);
    267  1.127        ad 	KASSERT(mutex_owned(&uobj->vmobjlock));
    268   1.44       chs 	KASSERT(pg->flags & PG_TABLED);
    269  1.123        ad 
    270   1.94      yamt 	if (UVM_OBJ_IS_VNODE(uobj)) {
    271   1.94      yamt 		if (uobj->uo_npages == 1) {
    272   1.94      yamt 			struct vnode *vp = (struct vnode *)uobj;
    273   1.94      yamt 
    274   1.94      yamt 			holdrelel(vp);
    275   1.94      yamt 		}
    276   1.94      yamt 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    277  1.126        ad 			atomic_dec_uint(&uvmexp.execpages);
    278   1.94      yamt 		} else {
    279  1.126        ad 			atomic_dec_uint(&uvmexp.filepages);
    280   1.94      yamt 		}
    281   1.78       chs 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    282  1.126        ad 		atomic_dec_uint(&uvmexp.anonpages);
    283   1.51       chs 	}
    284   1.44       chs 
    285    1.7       mrg 	/* object should be locked */
    286   1.67       chs 	uobj->uo_npages--;
    287  1.133        ad 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    288    1.7       mrg 	pg->flags &= ~PG_TABLED;
    289    1.7       mrg 	pg->uobject = NULL;
    290    1.1       mrg }
    291    1.1       mrg 
    292  1.136      yamt static inline void
    293  1.136      yamt uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
    294  1.136      yamt {
    295  1.136      yamt 
    296  1.136      yamt 	KASSERT(uobj == pg->uobject);
    297  1.136      yamt 	rb_tree_remove_node(&uobj->rb_tree, &pg->rb_node);
    298  1.136      yamt }
    299  1.136      yamt 
    300  1.136      yamt static inline void
    301  1.136      yamt uvm_pageremove(struct vm_page *pg)
    302  1.136      yamt {
    303  1.136      yamt 	struct uvm_object *uobj = pg->uobject;
    304  1.136      yamt 
    305  1.136      yamt 	uvm_pageremove_tree(uobj, pg);
    306  1.136      yamt 	uvm_pageremove_list(uobj, pg);
    307  1.136      yamt }
    308  1.136      yamt 
    309   1.60   thorpej static void
    310   1.60   thorpej uvm_page_init_buckets(struct pgfreelist *pgfl)
    311   1.60   thorpej {
    312   1.60   thorpej 	int color, i;
    313   1.60   thorpej 
    314   1.60   thorpej 	for (color = 0; color < uvmexp.ncolors; color++) {
    315   1.60   thorpej 		for (i = 0; i < PGFL_NQUEUES; i++) {
    316  1.133        ad 			LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
    317   1.60   thorpej 		}
    318   1.60   thorpej 	}
    319   1.60   thorpej }
    320   1.60   thorpej 
    321    1.1       mrg /*
    322    1.1       mrg  * uvm_page_init: init the page system.   called from uvm_init().
    323   1.62       chs  *
    324    1.1       mrg  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    325    1.1       mrg  */
    326    1.1       mrg 
    327    1.7       mrg void
    328  1.105   thorpej uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
    329    1.1       mrg {
    330   1.60   thorpej 	vsize_t freepages, pagecount, bucketcount, n;
    331  1.133        ad 	struct pgflbucket *bucketarray, *cpuarray;
    332   1.63       chs 	struct vm_page *pagearray;
    333   1.81   thorpej 	int lcv;
    334   1.81   thorpej 	u_int i;
    335   1.14       eeh 	paddr_t paddr;
    336    1.7       mrg 
    337  1.133        ad 	KASSERT(ncpu <= 1);
    338  1.133        ad 	KASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
    339  1.133        ad 
    340    1.7       mrg 	/*
    341   1.60   thorpej 	 * init the page queues and page queue locks, except the free
    342   1.60   thorpej 	 * list; we allocate that later (with the initial vm_page
    343   1.60   thorpej 	 * structures).
    344    1.7       mrg 	 */
    345   1.51       chs 
    346  1.133        ad 	curcpu()->ci_data.cpu_uvm = &uvm.cpus[0];
    347  1.113      yamt 	uvmpdpol_init();
    348  1.127        ad 	mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
    349  1.123        ad 	mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
    350    1.7       mrg 
    351    1.7       mrg 	/*
    352   1.51       chs 	 * allocate vm_page structures.
    353    1.7       mrg 	 */
    354    1.7       mrg 
    355    1.7       mrg 	/*
    356    1.7       mrg 	 * sanity check:
    357    1.7       mrg 	 * before calling this function the MD code is expected to register
    358    1.7       mrg 	 * some free RAM with the uvm_page_physload() function.   our job
    359    1.7       mrg 	 * now is to allocate vm_page structures for this memory.
    360    1.7       mrg 	 */
    361    1.7       mrg 
    362    1.7       mrg 	if (vm_nphysseg == 0)
    363   1.42       mrg 		panic("uvm_page_bootstrap: no memory pre-allocated");
    364   1.62       chs 
    365    1.7       mrg 	/*
    366   1.62       chs 	 * first calculate the number of free pages...
    367    1.7       mrg 	 *
    368    1.7       mrg 	 * note that we use start/end rather than avail_start/avail_end.
    369    1.7       mrg 	 * this allows us to allocate extra vm_page structures in case we
    370    1.7       mrg 	 * want to return some memory to the pool after booting.
    371    1.7       mrg 	 */
    372   1.62       chs 
    373    1.7       mrg 	freepages = 0;
    374    1.7       mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    375    1.7       mrg 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
    376    1.7       mrg 
    377    1.7       mrg 	/*
    378   1.60   thorpej 	 * Let MD code initialize the number of colors, or default
    379   1.60   thorpej 	 * to 1 color if MD code doesn't care.
    380   1.60   thorpej 	 */
    381   1.60   thorpej 	if (uvmexp.ncolors == 0)
    382   1.60   thorpej 		uvmexp.ncolors = 1;
    383   1.60   thorpej 	uvmexp.colormask = uvmexp.ncolors - 1;
    384   1.60   thorpej 
    385   1.60   thorpej 	/*
    386    1.7       mrg 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    387    1.7       mrg 	 * use.   for each page of memory we use we need a vm_page structure.
    388    1.7       mrg 	 * thus, the total number of pages we can use is the total size of
    389    1.7       mrg 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    390    1.7       mrg 	 * structure.   we add one to freepages as a fudge factor to avoid
    391    1.7       mrg 	 * truncation errors (since we can only allocate in terms of whole
    392    1.7       mrg 	 * pages).
    393    1.7       mrg 	 */
    394   1.62       chs 
    395   1.60   thorpej 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
    396   1.15       chs 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    397    1.7       mrg 	    (PAGE_SIZE + sizeof(struct vm_page));
    398   1.60   thorpej 
    399   1.67       chs 	bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
    400  1.133        ad 	    sizeof(struct pgflbucket) * 2) + (pagecount *
    401   1.60   thorpej 	    sizeof(struct vm_page)));
    402  1.133        ad 	cpuarray = bucketarray + bucketcount;
    403  1.133        ad 	pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
    404   1.60   thorpej 
    405   1.60   thorpej 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    406   1.60   thorpej 		uvm.page_free[lcv].pgfl_buckets =
    407   1.60   thorpej 		    (bucketarray + (lcv * uvmexp.ncolors));
    408   1.60   thorpej 		uvm_page_init_buckets(&uvm.page_free[lcv]);
    409  1.133        ad 		uvm.cpus[0].page_free[lcv].pgfl_buckets =
    410  1.133        ad 		    (cpuarray + (lcv * uvmexp.ncolors));
    411  1.133        ad 		uvm_page_init_buckets(&uvm.cpus[0].page_free[lcv]);
    412   1.60   thorpej 	}
    413   1.13     perry 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    414   1.62       chs 
    415    1.7       mrg 	/*
    416   1.51       chs 	 * init the vm_page structures and put them in the correct place.
    417    1.7       mrg 	 */
    418    1.7       mrg 
    419    1.7       mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    420    1.7       mrg 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
    421   1.51       chs 
    422    1.7       mrg 		/* set up page array pointers */
    423    1.7       mrg 		vm_physmem[lcv].pgs = pagearray;
    424    1.7       mrg 		pagearray += n;
    425    1.7       mrg 		pagecount -= n;
    426    1.7       mrg 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
    427    1.7       mrg 
    428   1.13     perry 		/* init and free vm_pages (we've already zeroed them) */
    429    1.7       mrg 		paddr = ptoa(vm_physmem[lcv].start);
    430    1.7       mrg 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    431    1.7       mrg 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
    432   1.56   thorpej #ifdef __HAVE_VM_PAGE_MD
    433   1.55   thorpej 			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
    434   1.56   thorpej #endif
    435    1.7       mrg 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
    436    1.7       mrg 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
    437    1.7       mrg 				uvmexp.npages++;
    438    1.7       mrg 				/* add page to free pool */
    439    1.7       mrg 				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
    440    1.7       mrg 			}
    441    1.7       mrg 		}
    442    1.7       mrg 	}
    443   1.44       chs 
    444    1.7       mrg 	/*
    445   1.88   thorpej 	 * pass up the values of virtual_space_start and
    446   1.88   thorpej 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    447   1.88   thorpej 	 * layers of the VM.
    448   1.88   thorpej 	 */
    449   1.88   thorpej 
    450   1.88   thorpej 	*kvm_startp = round_page(virtual_space_start);
    451   1.88   thorpej 	*kvm_endp = trunc_page(virtual_space_end);
    452   1.91      yamt #ifdef DEBUG
    453   1.91      yamt 	/*
    454   1.91      yamt 	 * steal kva for uvm_pagezerocheck().
    455   1.91      yamt 	 */
    456   1.91      yamt 	uvm_zerocheckkva = *kvm_startp;
    457   1.91      yamt 	*kvm_startp += PAGE_SIZE;
    458   1.91      yamt #endif /* DEBUG */
    459   1.88   thorpej 
    460   1.88   thorpej 	/*
    461   1.51       chs 	 * init various thresholds.
    462    1.7       mrg 	 */
    463   1.51       chs 
    464    1.7       mrg 	uvmexp.reserve_pagedaemon = 1;
    465    1.7       mrg 	uvmexp.reserve_kernel = 5;
    466    1.7       mrg 
    467    1.7       mrg 	/*
    468   1.51       chs 	 * determine if we should zero pages in the idle loop.
    469   1.34   thorpej 	 */
    470   1.51       chs 
    471  1.133        ad 	uvm.cpus[0].page_idle_zero = vm_page_zero_enable;
    472   1.34   thorpej 
    473   1.34   thorpej 	/*
    474    1.7       mrg 	 * done!
    475    1.7       mrg 	 */
    476    1.1       mrg 
    477  1.119   thorpej 	uvm.page_init_done = true;
    478    1.1       mrg }
    479    1.1       mrg 
    480    1.1       mrg /*
    481    1.1       mrg  * uvm_setpagesize: set the page size
    482   1.62       chs  *
    483    1.1       mrg  * => sets page_shift and page_mask from uvmexp.pagesize.
    484   1.62       chs  */
    485    1.1       mrg 
    486    1.7       mrg void
    487  1.105   thorpej uvm_setpagesize(void)
    488    1.1       mrg {
    489   1.85   thorpej 
    490   1.85   thorpej 	/*
    491   1.85   thorpej 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
    492   1.85   thorpej 	 * to be a constant (indicated by being a non-zero value).
    493   1.85   thorpej 	 */
    494   1.85   thorpej 	if (uvmexp.pagesize == 0) {
    495   1.85   thorpej 		if (PAGE_SIZE == 0)
    496   1.85   thorpej 			panic("uvm_setpagesize: uvmexp.pagesize not set");
    497   1.85   thorpej 		uvmexp.pagesize = PAGE_SIZE;
    498   1.85   thorpej 	}
    499    1.7       mrg 	uvmexp.pagemask = uvmexp.pagesize - 1;
    500    1.7       mrg 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    501    1.7       mrg 		panic("uvm_setpagesize: page size not a power of two");
    502    1.7       mrg 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    503    1.7       mrg 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    504    1.7       mrg 			break;
    505    1.1       mrg }
    506    1.1       mrg 
    507    1.1       mrg /*
    508    1.1       mrg  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    509    1.1       mrg  */
    510    1.1       mrg 
    511   1.14       eeh vaddr_t
    512  1.105   thorpej uvm_pageboot_alloc(vsize_t size)
    513    1.1       mrg {
    514  1.119   thorpej 	static bool initialized = false;
    515   1.14       eeh 	vaddr_t addr;
    516   1.52   thorpej #if !defined(PMAP_STEAL_MEMORY)
    517   1.52   thorpej 	vaddr_t vaddr;
    518   1.14       eeh 	paddr_t paddr;
    519   1.52   thorpej #endif
    520    1.1       mrg 
    521    1.7       mrg 	/*
    522   1.19   thorpej 	 * on first call to this function, initialize ourselves.
    523    1.7       mrg 	 */
    524  1.119   thorpej 	if (initialized == false) {
    525   1.88   thorpej 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    526    1.1       mrg 
    527    1.7       mrg 		/* round it the way we like it */
    528   1.88   thorpej 		virtual_space_start = round_page(virtual_space_start);
    529   1.88   thorpej 		virtual_space_end = trunc_page(virtual_space_end);
    530   1.19   thorpej 
    531  1.119   thorpej 		initialized = true;
    532    1.7       mrg 	}
    533   1.52   thorpej 
    534   1.52   thorpej 	/* round to page size */
    535   1.52   thorpej 	size = round_page(size);
    536   1.52   thorpej 
    537   1.52   thorpej #if defined(PMAP_STEAL_MEMORY)
    538   1.52   thorpej 
    539   1.62       chs 	/*
    540   1.62       chs 	 * defer bootstrap allocation to MD code (it may want to allocate
    541   1.52   thorpej 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    542   1.88   thorpej 	 * virtual_space_start/virtual_space_end if necessary.
    543   1.52   thorpej 	 */
    544   1.52   thorpej 
    545   1.88   thorpej 	addr = pmap_steal_memory(size, &virtual_space_start,
    546   1.88   thorpej 	    &virtual_space_end);
    547   1.52   thorpej 
    548   1.52   thorpej 	return(addr);
    549   1.52   thorpej 
    550   1.52   thorpej #else /* !PMAP_STEAL_MEMORY */
    551    1.1       mrg 
    552    1.7       mrg 	/*
    553    1.7       mrg 	 * allocate virtual memory for this request
    554    1.7       mrg 	 */
    555   1.88   thorpej 	if (virtual_space_start == virtual_space_end ||
    556   1.88   thorpej 	    (virtual_space_end - virtual_space_start) < size)
    557   1.19   thorpej 		panic("uvm_pageboot_alloc: out of virtual space");
    558   1.20   thorpej 
    559   1.88   thorpej 	addr = virtual_space_start;
    560   1.20   thorpej 
    561   1.20   thorpej #ifdef PMAP_GROWKERNEL
    562   1.20   thorpej 	/*
    563   1.20   thorpej 	 * If the kernel pmap can't map the requested space,
    564   1.20   thorpej 	 * then allocate more resources for it.
    565   1.20   thorpej 	 */
    566   1.20   thorpej 	if (uvm_maxkaddr < (addr + size)) {
    567   1.20   thorpej 		uvm_maxkaddr = pmap_growkernel(addr + size);
    568   1.20   thorpej 		if (uvm_maxkaddr < (addr + size))
    569   1.20   thorpej 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    570   1.19   thorpej 	}
    571   1.20   thorpej #endif
    572    1.1       mrg 
    573   1.88   thorpej 	virtual_space_start += size;
    574    1.1       mrg 
    575    1.9   thorpej 	/*
    576    1.7       mrg 	 * allocate and mapin physical pages to back new virtual pages
    577    1.7       mrg 	 */
    578    1.1       mrg 
    579    1.7       mrg 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    580    1.7       mrg 	    vaddr += PAGE_SIZE) {
    581    1.1       mrg 
    582    1.7       mrg 		if (!uvm_page_physget(&paddr))
    583    1.7       mrg 			panic("uvm_pageboot_alloc: out of memory");
    584    1.1       mrg 
    585   1.23   thorpej 		/*
    586   1.23   thorpej 		 * Note this memory is no longer managed, so using
    587   1.23   thorpej 		 * pmap_kenter is safe.
    588   1.23   thorpej 		 */
    589    1.7       mrg 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
    590    1.7       mrg 	}
    591   1.66     chris 	pmap_update(pmap_kernel());
    592    1.7       mrg 	return(addr);
    593    1.1       mrg #endif	/* PMAP_STEAL_MEMORY */
    594    1.1       mrg }
    595    1.1       mrg 
    596    1.1       mrg #if !defined(PMAP_STEAL_MEMORY)
    597    1.1       mrg /*
    598    1.1       mrg  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    599    1.1       mrg  *
    600    1.1       mrg  * => attempt to allocate it off the end of a segment in which the "avail"
    601    1.1       mrg  *    values match the start/end values.   if we can't do that, then we
    602    1.1       mrg  *    will advance both values (making them equal, and removing some
    603    1.1       mrg  *    vm_page structures from the non-avail area).
    604    1.1       mrg  * => return false if out of memory.
    605    1.1       mrg  */
    606    1.1       mrg 
    607   1.28  drochner /* subroutine: try to allocate from memory chunks on the specified freelist */
    608  1.118   thorpej static bool uvm_page_physget_freelist(paddr_t *, int);
    609   1.28  drochner 
    610  1.118   thorpej static bool
    611  1.105   thorpej uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
    612    1.1       mrg {
    613    1.7       mrg 	int lcv, x;
    614    1.1       mrg 
    615    1.7       mrg 	/* pass 1: try allocating from a matching end */
    616    1.1       mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    617    1.7       mrg 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    618    1.1       mrg #else
    619    1.7       mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    620    1.1       mrg #endif
    621    1.7       mrg 	{
    622    1.1       mrg 
    623  1.119   thorpej 		if (uvm.page_init_done == true)
    624   1.42       mrg 			panic("uvm_page_physget: called _after_ bootstrap");
    625    1.1       mrg 
    626   1.28  drochner 		if (vm_physmem[lcv].free_list != freelist)
    627   1.28  drochner 			continue;
    628   1.28  drochner 
    629    1.7       mrg 		/* try from front */
    630    1.7       mrg 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
    631    1.7       mrg 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    632    1.7       mrg 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
    633    1.7       mrg 			vm_physmem[lcv].avail_start++;
    634    1.7       mrg 			vm_physmem[lcv].start++;
    635    1.7       mrg 			/* nothing left?   nuke it */
    636    1.7       mrg 			if (vm_physmem[lcv].avail_start ==
    637    1.7       mrg 			    vm_physmem[lcv].end) {
    638    1.7       mrg 				if (vm_nphysseg == 1)
    639   1.89       wiz 				    panic("uvm_page_physget: out of memory!");
    640    1.7       mrg 				vm_nphysseg--;
    641    1.7       mrg 				for (x = lcv ; x < vm_nphysseg ; x++)
    642    1.7       mrg 					/* structure copy */
    643    1.7       mrg 					vm_physmem[x] = vm_physmem[x+1];
    644    1.7       mrg 			}
    645  1.119   thorpej 			return (true);
    646    1.7       mrg 		}
    647    1.7       mrg 
    648    1.7       mrg 		/* try from rear */
    649    1.7       mrg 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
    650    1.7       mrg 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
    651    1.7       mrg 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
    652    1.7       mrg 			vm_physmem[lcv].avail_end--;
    653    1.7       mrg 			vm_physmem[lcv].end--;
    654    1.7       mrg 			/* nothing left?   nuke it */
    655    1.7       mrg 			if (vm_physmem[lcv].avail_end ==
    656    1.7       mrg 			    vm_physmem[lcv].start) {
    657    1.7       mrg 				if (vm_nphysseg == 1)
    658   1.42       mrg 				    panic("uvm_page_physget: out of memory!");
    659    1.7       mrg 				vm_nphysseg--;
    660    1.7       mrg 				for (x = lcv ; x < vm_nphysseg ; x++)
    661    1.7       mrg 					/* structure copy */
    662    1.7       mrg 					vm_physmem[x] = vm_physmem[x+1];
    663    1.7       mrg 			}
    664  1.119   thorpej 			return (true);
    665    1.7       mrg 		}
    666    1.7       mrg 	}
    667    1.1       mrg 
    668    1.7       mrg 	/* pass2: forget about matching ends, just allocate something */
    669    1.1       mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    670    1.7       mrg 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
    671    1.1       mrg #else
    672    1.7       mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    673    1.1       mrg #endif
    674    1.7       mrg 	{
    675    1.1       mrg 
    676    1.7       mrg 		/* any room in this bank? */
    677    1.7       mrg 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
    678    1.7       mrg 			continue;  /* nope */
    679    1.7       mrg 
    680    1.7       mrg 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
    681    1.7       mrg 		vm_physmem[lcv].avail_start++;
    682    1.7       mrg 		/* truncate! */
    683    1.7       mrg 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
    684    1.7       mrg 
    685    1.7       mrg 		/* nothing left?   nuke it */
    686    1.7       mrg 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
    687    1.7       mrg 			if (vm_nphysseg == 1)
    688   1.42       mrg 				panic("uvm_page_physget: out of memory!");
    689    1.7       mrg 			vm_nphysseg--;
    690    1.7       mrg 			for (x = lcv ; x < vm_nphysseg ; x++)
    691    1.7       mrg 				/* structure copy */
    692    1.7       mrg 				vm_physmem[x] = vm_physmem[x+1];
    693    1.7       mrg 		}
    694  1.119   thorpej 		return (true);
    695    1.7       mrg 	}
    696    1.1       mrg 
    697  1.119   thorpej 	return (false);        /* whoops! */
    698   1.28  drochner }
    699   1.28  drochner 
    700  1.118   thorpej bool
    701  1.105   thorpej uvm_page_physget(paddr_t *paddrp)
    702   1.28  drochner {
    703   1.28  drochner 	int i;
    704   1.28  drochner 
    705   1.28  drochner 	/* try in the order of freelist preference */
    706   1.28  drochner 	for (i = 0; i < VM_NFREELIST; i++)
    707  1.119   thorpej 		if (uvm_page_physget_freelist(paddrp, i) == true)
    708  1.119   thorpej 			return (true);
    709  1.119   thorpej 	return (false);
    710    1.1       mrg }
    711    1.1       mrg #endif /* PMAP_STEAL_MEMORY */
    712    1.1       mrg 
    713    1.1       mrg /*
    714    1.1       mrg  * uvm_page_physload: load physical memory into VM system
    715    1.1       mrg  *
    716    1.1       mrg  * => all args are PFs
    717    1.1       mrg  * => all pages in start/end get vm_page structures
    718    1.1       mrg  * => areas marked by avail_start/avail_end get added to the free page pool
    719    1.1       mrg  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    720    1.1       mrg  */
    721    1.1       mrg 
    722    1.7       mrg void
    723  1.105   thorpej uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
    724  1.105   thorpej     paddr_t avail_end, int free_list)
    725    1.1       mrg {
    726   1.14       eeh 	int preload, lcv;
    727   1.14       eeh 	psize_t npages;
    728    1.7       mrg 	struct vm_page *pgs;
    729    1.7       mrg 	struct vm_physseg *ps;
    730    1.7       mrg 
    731    1.7       mrg 	if (uvmexp.pagesize == 0)
    732   1.42       mrg 		panic("uvm_page_physload: page size not set!");
    733   1.12   thorpej 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    734   1.79    provos 		panic("uvm_page_physload: bad free list %d", free_list);
    735   1.26  drochner 	if (start >= end)
    736   1.26  drochner 		panic("uvm_page_physload: start >= end");
    737   1.12   thorpej 
    738    1.7       mrg 	/*
    739    1.7       mrg 	 * do we have room?
    740    1.7       mrg 	 */
    741   1.67       chs 
    742    1.7       mrg 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
    743   1.42       mrg 		printf("uvm_page_physload: unable to load physical memory "
    744    1.7       mrg 		    "segment\n");
    745   1.37      soda 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    746   1.37      soda 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    747   1.43  christos 		printf("\tincrease VM_PHYSSEG_MAX\n");
    748    1.7       mrg 		return;
    749    1.7       mrg 	}
    750    1.7       mrg 
    751    1.7       mrg 	/*
    752    1.7       mrg 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
    753    1.7       mrg 	 * called yet, so malloc is not available).
    754    1.7       mrg 	 */
    755   1.67       chs 
    756    1.7       mrg 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    757    1.7       mrg 		if (vm_physmem[lcv].pgs)
    758    1.7       mrg 			break;
    759    1.7       mrg 	}
    760    1.7       mrg 	preload = (lcv == vm_nphysseg);
    761    1.7       mrg 
    762    1.7       mrg 	/*
    763    1.7       mrg 	 * if VM is already running, attempt to malloc() vm_page structures
    764    1.7       mrg 	 */
    765   1.67       chs 
    766    1.7       mrg 	if (!preload) {
    767    1.1       mrg #if defined(VM_PHYSSEG_NOADD)
    768   1.42       mrg 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
    769    1.1       mrg #else
    770    1.7       mrg 		/* XXXCDC: need some sort of lockout for this case */
    771   1.14       eeh 		paddr_t paddr;
    772    1.7       mrg 		npages = end - start;  /* # of pages */
    773   1.40   thorpej 		pgs = malloc(sizeof(struct vm_page) * npages,
    774   1.40   thorpej 		    M_VMPAGE, M_NOWAIT);
    775    1.7       mrg 		if (pgs == NULL) {
    776   1.42       mrg 			printf("uvm_page_physload: can not malloc vm_page "
    777    1.7       mrg 			    "structs for segment\n");
    778    1.7       mrg 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
    779    1.7       mrg 			return;
    780    1.7       mrg 		}
    781   1.12   thorpej 		/* zero data, init phys_addr and free_list, and free pages */
    782   1.13     perry 		memset(pgs, 0, sizeof(struct vm_page) * npages);
    783    1.7       mrg 		for (lcv = 0, paddr = ptoa(start) ;
    784    1.7       mrg 				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
    785    1.7       mrg 			pgs[lcv].phys_addr = paddr;
    786   1.12   thorpej 			pgs[lcv].free_list = free_list;
    787    1.7       mrg 			if (atop(paddr) >= avail_start &&
    788    1.7       mrg 			    atop(paddr) <= avail_end)
    789    1.8     chuck 				uvm_pagefree(&pgs[lcv]);
    790    1.7       mrg 		}
    791    1.7       mrg 		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
    792    1.7       mrg 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
    793    1.1       mrg #endif
    794    1.7       mrg 	} else {
    795    1.7       mrg 		pgs = NULL;
    796    1.7       mrg 		npages = 0;
    797    1.7       mrg 	}
    798    1.1       mrg 
    799    1.7       mrg 	/*
    800    1.7       mrg 	 * now insert us in the proper place in vm_physmem[]
    801    1.7       mrg 	 */
    802    1.1       mrg 
    803    1.1       mrg #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    804    1.7       mrg 	/* random: put it at the end (easy!) */
    805    1.7       mrg 	ps = &vm_physmem[vm_nphysseg];
    806    1.1       mrg #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    807    1.7       mrg 	{
    808    1.7       mrg 		int x;
    809    1.7       mrg 		/* sort by address for binary search */
    810    1.7       mrg 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    811    1.7       mrg 			if (start < vm_physmem[lcv].start)
    812    1.7       mrg 				break;
    813    1.7       mrg 		ps = &vm_physmem[lcv];
    814    1.7       mrg 		/* move back other entries, if necessary ... */
    815    1.7       mrg 		for (x = vm_nphysseg ; x > lcv ; x--)
    816    1.7       mrg 			/* structure copy */
    817    1.7       mrg 			vm_physmem[x] = vm_physmem[x - 1];
    818    1.7       mrg 	}
    819    1.1       mrg #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    820    1.7       mrg 	{
    821    1.7       mrg 		int x;
    822    1.7       mrg 		/* sort by largest segment first */
    823    1.7       mrg 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
    824    1.7       mrg 			if ((end - start) >
    825    1.7       mrg 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
    826    1.7       mrg 				break;
    827    1.7       mrg 		ps = &vm_physmem[lcv];
    828    1.7       mrg 		/* move back other entries, if necessary ... */
    829    1.7       mrg 		for (x = vm_nphysseg ; x > lcv ; x--)
    830    1.7       mrg 			/* structure copy */
    831    1.7       mrg 			vm_physmem[x] = vm_physmem[x - 1];
    832    1.7       mrg 	}
    833    1.1       mrg #else
    834   1.42       mrg 	panic("uvm_page_physload: unknown physseg strategy selected!");
    835    1.1       mrg #endif
    836    1.1       mrg 
    837    1.7       mrg 	ps->start = start;
    838    1.7       mrg 	ps->end = end;
    839    1.7       mrg 	ps->avail_start = avail_start;
    840    1.7       mrg 	ps->avail_end = avail_end;
    841    1.7       mrg 	if (preload) {
    842    1.7       mrg 		ps->pgs = NULL;
    843    1.7       mrg 	} else {
    844    1.7       mrg 		ps->pgs = pgs;
    845    1.7       mrg 		ps->lastpg = pgs + npages - 1;
    846    1.7       mrg 	}
    847   1.12   thorpej 	ps->free_list = free_list;
    848    1.7       mrg 	vm_nphysseg++;
    849    1.7       mrg 
    850  1.113      yamt 	if (!preload) {
    851  1.113      yamt 		uvmpdpol_reinit();
    852  1.113      yamt 	}
    853    1.1       mrg }
    854    1.1       mrg 
    855    1.1       mrg /*
    856   1.60   thorpej  * uvm_page_recolor: Recolor the pages if the new bucket count is
    857   1.60   thorpej  * larger than the old one.
    858   1.60   thorpej  */
    859   1.60   thorpej 
    860   1.60   thorpej void
    861   1.60   thorpej uvm_page_recolor(int newncolors)
    862   1.60   thorpej {
    863  1.133        ad 	struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
    864  1.133        ad 	struct pgfreelist gpgfl, pgfl;
    865   1.63       chs 	struct vm_page *pg;
    866   1.60   thorpej 	vsize_t bucketcount;
    867  1.123        ad 	int lcv, color, i, ocolors;
    868  1.133        ad 	struct uvm_cpu *ucpu;
    869   1.60   thorpej 
    870   1.60   thorpej 	if (newncolors <= uvmexp.ncolors)
    871   1.60   thorpej 		return;
    872   1.77  wrstuden 
    873  1.119   thorpej 	if (uvm.page_init_done == false) {
    874   1.77  wrstuden 		uvmexp.ncolors = newncolors;
    875   1.77  wrstuden 		return;
    876   1.77  wrstuden 	}
    877   1.60   thorpej 
    878   1.60   thorpej 	bucketcount = newncolors * VM_NFREELIST;
    879  1.133        ad 	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2,
    880   1.60   thorpej 	    M_VMPAGE, M_NOWAIT);
    881  1.133        ad 	cpuarray = bucketarray + bucketcount;
    882   1.60   thorpej 	if (bucketarray == NULL) {
    883   1.60   thorpej 		printf("WARNING: unable to allocate %ld page color buckets\n",
    884   1.60   thorpej 		    (long) bucketcount);
    885   1.60   thorpej 		return;
    886   1.60   thorpej 	}
    887   1.60   thorpej 
    888  1.123        ad 	mutex_spin_enter(&uvm_fpageqlock);
    889   1.60   thorpej 
    890   1.60   thorpej 	/* Make sure we should still do this. */
    891   1.60   thorpej 	if (newncolors <= uvmexp.ncolors) {
    892  1.123        ad 		mutex_spin_exit(&uvm_fpageqlock);
    893   1.60   thorpej 		free(bucketarray, M_VMPAGE);
    894   1.60   thorpej 		return;
    895   1.60   thorpej 	}
    896   1.60   thorpej 
    897   1.60   thorpej 	oldbucketarray = uvm.page_free[0].pgfl_buckets;
    898   1.60   thorpej 	ocolors = uvmexp.ncolors;
    899   1.60   thorpej 
    900   1.60   thorpej 	uvmexp.ncolors = newncolors;
    901   1.60   thorpej 	uvmexp.colormask = uvmexp.ncolors - 1;
    902   1.60   thorpej 
    903  1.133        ad 	ucpu = curcpu()->ci_data.cpu_uvm;
    904   1.60   thorpej 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    905  1.133        ad 		gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
    906  1.133        ad 		pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
    907  1.133        ad 		uvm_page_init_buckets(&gpgfl);
    908   1.60   thorpej 		uvm_page_init_buckets(&pgfl);
    909   1.60   thorpej 		for (color = 0; color < ocolors; color++) {
    910   1.60   thorpej 			for (i = 0; i < PGFL_NQUEUES; i++) {
    911  1.133        ad 				while ((pg = LIST_FIRST(&uvm.page_free[
    912   1.60   thorpej 				    lcv].pgfl_buckets[color].pgfl_queues[i]))
    913   1.60   thorpej 				    != NULL) {
    914  1.133        ad 					LIST_REMOVE(pg, pageq.list); /* global */
    915  1.133        ad 					LIST_REMOVE(pg, listq.list); /* cpu */
    916  1.133        ad 					LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
    917  1.133        ad 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
    918  1.133        ad 					    i], pg, pageq.list);
    919  1.133        ad 					LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
    920   1.60   thorpej 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
    921  1.133        ad 					    i], pg, listq.list);
    922   1.60   thorpej 				}
    923   1.60   thorpej 			}
    924   1.60   thorpej 		}
    925  1.133        ad 		uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
    926  1.133        ad 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
    927   1.60   thorpej 	}
    928   1.60   thorpej 
    929   1.60   thorpej 	if (have_recolored_pages) {
    930  1.123        ad 		mutex_spin_exit(&uvm_fpageqlock);
    931   1.60   thorpej 		free(oldbucketarray, M_VMPAGE);
    932   1.60   thorpej 		return;
    933   1.60   thorpej 	}
    934   1.60   thorpej 
    935  1.119   thorpej 	have_recolored_pages = true;
    936  1.123        ad 	mutex_spin_exit(&uvm_fpageqlock);
    937   1.60   thorpej }
    938    1.1       mrg 
    939    1.1       mrg /*
    940  1.133        ad  * uvm_cpu_attach: initialize per-CPU data structures.
    941  1.133        ad  */
    942  1.133        ad 
    943  1.133        ad void
    944  1.133        ad uvm_cpu_attach(struct cpu_info *ci)
    945  1.133        ad {
    946  1.133        ad 	struct pgflbucket *bucketarray;
    947  1.133        ad 	struct pgfreelist pgfl;
    948  1.133        ad 	struct uvm_cpu *ucpu;
    949  1.133        ad 	vsize_t bucketcount;
    950  1.133        ad 	int lcv;
    951  1.133        ad 
    952  1.133        ad 	if (CPU_IS_PRIMARY(ci)) {
    953  1.133        ad 		/* Already done in uvm_page_init(). */
    954  1.133        ad 		return;
    955  1.133        ad 	}
    956  1.133        ad 
    957  1.133        ad 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
    958  1.133        ad 	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
    959  1.133        ad 	    M_VMPAGE, M_WAITOK);
    960  1.133        ad 	ucpu = &uvm.cpus[cpu_index(ci)];
    961  1.133        ad 	ci->ci_data.cpu_uvm = ucpu;
    962  1.133        ad 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    963  1.133        ad 		pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
    964  1.133        ad 		uvm_page_init_buckets(&pgfl);
    965  1.133        ad 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
    966  1.133        ad 	}
    967  1.133        ad }
    968  1.133        ad 
    969  1.133        ad /*
    970   1.54   thorpej  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
    971   1.54   thorpej  */
    972   1.54   thorpej 
    973  1.114   thorpej static struct vm_page *
    974  1.133        ad uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
    975   1.69    simonb     int *trycolorp)
    976   1.54   thorpej {
    977  1.133        ad 	struct pgflist *freeq;
    978   1.54   thorpej 	struct vm_page *pg;
    979   1.58     enami 	int color, trycolor = *trycolorp;
    980  1.133        ad 	struct pgfreelist *gpgfl, *pgfl;
    981   1.54   thorpej 
    982  1.130        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    983  1.130        ad 
    984   1.58     enami 	color = trycolor;
    985  1.133        ad 	pgfl = &ucpu->page_free[flist];
    986  1.133        ad 	gpgfl = &uvm.page_free[flist];
    987   1.58     enami 	do {
    988  1.133        ad 		/* cpu, try1 */
    989  1.133        ad 		if ((pg = LIST_FIRST((freeq =
    990  1.133        ad 		    &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
    991  1.133        ad 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
    992  1.133        ad 		    	uvmexp.cpuhit++;
    993  1.133        ad 			goto gotit;
    994  1.133        ad 		}
    995  1.133        ad 		/* global, try1 */
    996  1.133        ad 		if ((pg = LIST_FIRST((freeq =
    997  1.133        ad 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
    998  1.133        ad 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
    999  1.133        ad 		    	uvmexp.cpumiss++;
   1000   1.54   thorpej 			goto gotit;
   1001  1.133        ad 		}
   1002  1.133        ad 		/* cpu, try2 */
   1003  1.133        ad 		if ((pg = LIST_FIRST((freeq =
   1004  1.133        ad 		    &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1005  1.133        ad 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1006  1.133        ad 		    	uvmexp.cpuhit++;
   1007   1.54   thorpej 			goto gotit;
   1008  1.133        ad 		}
   1009  1.133        ad 		/* global, try2 */
   1010  1.133        ad 		if ((pg = LIST_FIRST((freeq =
   1011  1.133        ad 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1012  1.133        ad 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1013  1.133        ad 		    	uvmexp.cpumiss++;
   1014  1.133        ad 			goto gotit;
   1015  1.133        ad 		}
   1016   1.60   thorpej 		color = (color + 1) & uvmexp.colormask;
   1017   1.58     enami 	} while (color != trycolor);
   1018   1.54   thorpej 
   1019   1.54   thorpej 	return (NULL);
   1020   1.54   thorpej 
   1021   1.54   thorpej  gotit:
   1022  1.133        ad 	LIST_REMOVE(pg, pageq.list);	/* global list */
   1023  1.133        ad 	LIST_REMOVE(pg, listq.list);	/* per-cpu list */
   1024   1.54   thorpej 	uvmexp.free--;
   1025   1.54   thorpej 
   1026   1.54   thorpej 	/* update zero'd page count */
   1027   1.54   thorpej 	if (pg->flags & PG_ZERO)
   1028   1.54   thorpej 		uvmexp.zeropages--;
   1029   1.54   thorpej 
   1030   1.54   thorpej 	if (color == trycolor)
   1031   1.54   thorpej 		uvmexp.colorhit++;
   1032   1.54   thorpej 	else {
   1033   1.54   thorpej 		uvmexp.colormiss++;
   1034   1.54   thorpej 		*trycolorp = color;
   1035   1.54   thorpej 	}
   1036   1.54   thorpej 
   1037   1.54   thorpej 	return (pg);
   1038   1.54   thorpej }
   1039   1.54   thorpej 
   1040   1.54   thorpej /*
   1041   1.12   thorpej  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
   1042    1.1       mrg  *
   1043    1.1       mrg  * => return null if no pages free
   1044    1.1       mrg  * => wake up pagedaemon if number of free pages drops below low water mark
   1045  1.133        ad  * => if obj != NULL, obj must be locked (to put in obj's tree)
   1046    1.1       mrg  * => if anon != NULL, anon must be locked (to put in anon)
   1047    1.1       mrg  * => only one of obj or anon can be non-null
   1048    1.1       mrg  * => caller must activate/deactivate page if it is not wired.
   1049   1.12   thorpej  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
   1050   1.34   thorpej  * => policy decision: it is more important to pull a page off of the
   1051   1.34   thorpej  *	appropriate priority free list than it is to get a zero'd or
   1052   1.34   thorpej  *	unknown contents page.  This is because we live with the
   1053   1.34   thorpej  *	consequences of a bad free list decision for the entire
   1054   1.34   thorpej  *	lifetime of the page, e.g. if the page comes from memory that
   1055   1.34   thorpej  *	is slower to access.
   1056    1.1       mrg  */
   1057    1.1       mrg 
   1058    1.7       mrg struct vm_page *
   1059  1.105   thorpej uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
   1060  1.105   thorpej     int flags, int strat, int free_list)
   1061    1.1       mrg {
   1062  1.123        ad 	int lcv, try1, try2, zeroit = 0, color;
   1063  1.133        ad 	struct uvm_cpu *ucpu;
   1064    1.7       mrg 	struct vm_page *pg;
   1065  1.118   thorpej 	bool use_reserve;
   1066    1.1       mrg 
   1067   1.44       chs 	KASSERT(obj == NULL || anon == NULL);
   1068  1.113      yamt 	KASSERT(anon == NULL || off == 0);
   1069   1.44       chs 	KASSERT(off == trunc_page(off));
   1070  1.127        ad 	KASSERT(obj == NULL || mutex_owned(&obj->vmobjlock));
   1071  1.127        ad 	KASSERT(anon == NULL || mutex_owned(&anon->an_lock));
   1072   1.48   thorpej 
   1073  1.123        ad 	mutex_spin_enter(&uvm_fpageqlock);
   1074    1.1       mrg 
   1075    1.7       mrg 	/*
   1076   1.54   thorpej 	 * This implements a global round-robin page coloring
   1077   1.54   thorpej 	 * algorithm.
   1078   1.54   thorpej 	 *
   1079   1.54   thorpej 	 * XXXJRT: What about virtually-indexed caches?
   1080   1.54   thorpej 	 */
   1081   1.67       chs 
   1082  1.133        ad 	ucpu = curcpu()->ci_data.cpu_uvm;
   1083  1.133        ad 	color = ucpu->page_free_nextcolor;
   1084   1.54   thorpej 
   1085   1.54   thorpej 	/*
   1086    1.7       mrg 	 * check to see if we need to generate some free pages waking
   1087    1.7       mrg 	 * the pagedaemon.
   1088    1.7       mrg 	 */
   1089    1.7       mrg 
   1090  1.113      yamt 	uvm_kick_pdaemon();
   1091    1.7       mrg 
   1092    1.7       mrg 	/*
   1093    1.7       mrg 	 * fail if any of these conditions is true:
   1094    1.7       mrg 	 * [1]  there really are no free pages, or
   1095    1.7       mrg 	 * [2]  only kernel "reserved" pages remain and
   1096    1.7       mrg 	 *        the page isn't being allocated to a kernel object.
   1097    1.7       mrg 	 * [3]  only pagedaemon "reserved" pages remain and
   1098    1.7       mrg 	 *        the requestor isn't the pagedaemon.
   1099    1.7       mrg 	 */
   1100    1.7       mrg 
   1101   1.18       chs 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
   1102   1.22   thorpej 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
   1103   1.18       chs 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
   1104    1.7       mrg 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
   1105  1.122        ad 	     !(use_reserve && curlwp == uvm.pagedaemon_lwp)))
   1106   1.12   thorpej 		goto fail;
   1107   1.12   thorpej 
   1108   1.34   thorpej #if PGFL_NQUEUES != 2
   1109   1.34   thorpej #error uvm_pagealloc_strat needs to be updated
   1110   1.34   thorpej #endif
   1111   1.34   thorpej 
   1112   1.34   thorpej 	/*
   1113   1.34   thorpej 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
   1114   1.34   thorpej 	 * we try the UNKNOWN queue first.
   1115   1.34   thorpej 	 */
   1116   1.34   thorpej 	if (flags & UVM_PGA_ZERO) {
   1117   1.34   thorpej 		try1 = PGFL_ZEROS;
   1118   1.34   thorpej 		try2 = PGFL_UNKNOWN;
   1119   1.34   thorpej 	} else {
   1120   1.34   thorpej 		try1 = PGFL_UNKNOWN;
   1121   1.34   thorpej 		try2 = PGFL_ZEROS;
   1122   1.34   thorpej 	}
   1123   1.34   thorpej 
   1124   1.12   thorpej  again:
   1125   1.12   thorpej 	switch (strat) {
   1126   1.12   thorpej 	case UVM_PGA_STRAT_NORMAL:
   1127   1.12   thorpej 		/* Check all freelists in descending priority order. */
   1128   1.12   thorpej 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1129  1.133        ad 			pg = uvm_pagealloc_pgfl(ucpu, lcv,
   1130   1.54   thorpej 			    try1, try2, &color);
   1131   1.54   thorpej 			if (pg != NULL)
   1132   1.12   thorpej 				goto gotit;
   1133   1.12   thorpej 		}
   1134   1.12   thorpej 
   1135   1.12   thorpej 		/* No pages free! */
   1136   1.12   thorpej 		goto fail;
   1137   1.12   thorpej 
   1138   1.12   thorpej 	case UVM_PGA_STRAT_ONLY:
   1139   1.12   thorpej 	case UVM_PGA_STRAT_FALLBACK:
   1140   1.12   thorpej 		/* Attempt to allocate from the specified free list. */
   1141   1.44       chs 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1142  1.133        ad 		pg = uvm_pagealloc_pgfl(ucpu, free_list,
   1143   1.54   thorpej 		    try1, try2, &color);
   1144   1.54   thorpej 		if (pg != NULL)
   1145   1.12   thorpej 			goto gotit;
   1146   1.12   thorpej 
   1147   1.12   thorpej 		/* Fall back, if possible. */
   1148   1.12   thorpej 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1149   1.12   thorpej 			strat = UVM_PGA_STRAT_NORMAL;
   1150   1.12   thorpej 			goto again;
   1151   1.12   thorpej 		}
   1152   1.12   thorpej 
   1153   1.12   thorpej 		/* No pages free! */
   1154   1.12   thorpej 		goto fail;
   1155   1.12   thorpej 
   1156   1.12   thorpej 	default:
   1157   1.12   thorpej 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1158   1.12   thorpej 		/* NOTREACHED */
   1159    1.7       mrg 	}
   1160    1.7       mrg 
   1161   1.12   thorpej  gotit:
   1162   1.54   thorpej 	/*
   1163   1.54   thorpej 	 * We now know which color we actually allocated from; set
   1164   1.54   thorpej 	 * the next color accordingly.
   1165   1.54   thorpej 	 */
   1166   1.67       chs 
   1167  1.133        ad 	ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
   1168   1.34   thorpej 
   1169   1.34   thorpej 	/*
   1170   1.34   thorpej 	 * update allocation statistics and remember if we have to
   1171   1.34   thorpej 	 * zero the page
   1172   1.34   thorpej 	 */
   1173   1.67       chs 
   1174   1.34   thorpej 	if (flags & UVM_PGA_ZERO) {
   1175   1.34   thorpej 		if (pg->flags & PG_ZERO) {
   1176   1.34   thorpej 			uvmexp.pga_zerohit++;
   1177   1.34   thorpej 			zeroit = 0;
   1178   1.34   thorpej 		} else {
   1179   1.34   thorpej 			uvmexp.pga_zeromiss++;
   1180   1.34   thorpej 			zeroit = 1;
   1181   1.34   thorpej 		}
   1182  1.133        ad 		if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1183  1.133        ad 			ucpu->page_idle_zero = vm_page_zero_enable;
   1184  1.133        ad 		}
   1185   1.34   thorpej 	}
   1186  1.123        ad 	mutex_spin_exit(&uvm_fpageqlock);
   1187    1.7       mrg 
   1188    1.7       mrg 	pg->offset = off;
   1189    1.7       mrg 	pg->uobject = obj;
   1190    1.7       mrg 	pg->uanon = anon;
   1191    1.7       mrg 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1192    1.7       mrg 	if (anon) {
   1193  1.103      yamt 		anon->an_page = pg;
   1194    1.7       mrg 		pg->pqflags = PQ_ANON;
   1195  1.126        ad 		atomic_inc_uint(&uvmexp.anonpages);
   1196    1.7       mrg 	} else {
   1197   1.67       chs 		if (obj) {
   1198    1.7       mrg 			uvm_pageinsert(pg);
   1199   1.67       chs 		}
   1200    1.7       mrg 		pg->pqflags = 0;
   1201    1.7       mrg 	}
   1202    1.1       mrg #if defined(UVM_PAGE_TRKOWN)
   1203    1.7       mrg 	pg->owner_tag = NULL;
   1204    1.1       mrg #endif
   1205    1.7       mrg 	UVM_PAGE_OWN(pg, "new alloc");
   1206   1.33   thorpej 
   1207   1.33   thorpej 	if (flags & UVM_PGA_ZERO) {
   1208   1.33   thorpej 		/*
   1209   1.34   thorpej 		 * A zero'd page is not clean.  If we got a page not already
   1210   1.34   thorpej 		 * zero'd, then we have to zero it ourselves.
   1211   1.33   thorpej 		 */
   1212   1.33   thorpej 		pg->flags &= ~PG_CLEAN;
   1213   1.34   thorpej 		if (zeroit)
   1214   1.34   thorpej 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1215   1.33   thorpej 	}
   1216    1.1       mrg 
   1217    1.7       mrg 	return(pg);
   1218   1.12   thorpej 
   1219   1.12   thorpej  fail:
   1220  1.123        ad 	mutex_spin_exit(&uvm_fpageqlock);
   1221   1.12   thorpej 	return (NULL);
   1222    1.1       mrg }
   1223    1.1       mrg 
   1224    1.1       mrg /*
   1225   1.96      yamt  * uvm_pagereplace: replace a page with another
   1226   1.96      yamt  *
   1227   1.96      yamt  * => object must be locked
   1228   1.96      yamt  */
   1229   1.96      yamt 
   1230   1.96      yamt void
   1231  1.105   thorpej uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
   1232   1.96      yamt {
   1233  1.136      yamt 	struct uvm_object *uobj = oldpg->uobject;
   1234   1.97  junyoung 
   1235   1.96      yamt 	KASSERT((oldpg->flags & PG_TABLED) != 0);
   1236  1.136      yamt 	KASSERT(uobj != NULL);
   1237   1.96      yamt 	KASSERT((newpg->flags & PG_TABLED) == 0);
   1238   1.96      yamt 	KASSERT(newpg->uobject == NULL);
   1239  1.136      yamt 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1240   1.96      yamt 
   1241  1.136      yamt 	newpg->uobject = uobj;
   1242   1.96      yamt 	newpg->offset = oldpg->offset;
   1243   1.96      yamt 
   1244  1.136      yamt 	uvm_pageremove_tree(uobj, oldpg);
   1245  1.136      yamt 	uvm_pageinsert_tree(uobj, newpg);
   1246  1.136      yamt 	uvm_pageinsert_list(uobj, newpg, oldpg);
   1247  1.136      yamt 	uvm_pageremove_list(uobj, oldpg);
   1248   1.96      yamt }
   1249   1.96      yamt 
   1250   1.96      yamt /*
   1251    1.1       mrg  * uvm_pagerealloc: reallocate a page from one object to another
   1252    1.1       mrg  *
   1253    1.1       mrg  * => both objects must be locked
   1254    1.1       mrg  */
   1255    1.1       mrg 
   1256    1.7       mrg void
   1257  1.105   thorpej uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
   1258    1.1       mrg {
   1259    1.7       mrg 	/*
   1260    1.7       mrg 	 * remove it from the old object
   1261    1.7       mrg 	 */
   1262    1.7       mrg 
   1263    1.7       mrg 	if (pg->uobject) {
   1264    1.7       mrg 		uvm_pageremove(pg);
   1265    1.7       mrg 	}
   1266    1.7       mrg 
   1267    1.7       mrg 	/*
   1268    1.7       mrg 	 * put it in the new object
   1269    1.7       mrg 	 */
   1270    1.7       mrg 
   1271    1.7       mrg 	if (newobj) {
   1272    1.7       mrg 		pg->uobject = newobj;
   1273    1.7       mrg 		pg->offset = newoff;
   1274    1.7       mrg 		uvm_pageinsert(pg);
   1275    1.7       mrg 	}
   1276    1.1       mrg }
   1277    1.1       mrg 
   1278   1.91      yamt #ifdef DEBUG
   1279   1.91      yamt /*
   1280   1.91      yamt  * check if page is zero-filled
   1281   1.91      yamt  *
   1282   1.91      yamt  *  - called with free page queue lock held.
   1283   1.91      yamt  */
   1284   1.91      yamt void
   1285   1.91      yamt uvm_pagezerocheck(struct vm_page *pg)
   1286   1.91      yamt {
   1287   1.91      yamt 	int *p, *ep;
   1288   1.91      yamt 
   1289   1.91      yamt 	KASSERT(uvm_zerocheckkva != 0);
   1290  1.123        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
   1291   1.91      yamt 
   1292   1.91      yamt 	/*
   1293   1.91      yamt 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
   1294   1.91      yamt 	 * uvm page allocator.
   1295   1.91      yamt 	 *
   1296   1.95       wiz 	 * it might be better to have "CPU-local temporary map" pmap interface.
   1297   1.91      yamt 	 */
   1298   1.91      yamt 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ);
   1299   1.91      yamt 	p = (int *)uvm_zerocheckkva;
   1300   1.91      yamt 	ep = (int *)((char *)p + PAGE_SIZE);
   1301   1.92      yamt 	pmap_update(pmap_kernel());
   1302   1.91      yamt 	while (p < ep) {
   1303   1.91      yamt 		if (*p != 0)
   1304   1.91      yamt 			panic("PG_ZERO page isn't zero-filled");
   1305   1.91      yamt 		p++;
   1306   1.91      yamt 	}
   1307   1.91      yamt 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
   1308  1.131      yamt 	/*
   1309  1.131      yamt 	 * pmap_update() is not necessary here because no one except us
   1310  1.131      yamt 	 * uses this VA.
   1311  1.131      yamt 	 */
   1312   1.91      yamt }
   1313   1.91      yamt #endif /* DEBUG */
   1314   1.91      yamt 
   1315    1.1       mrg /*
   1316    1.1       mrg  * uvm_pagefree: free page
   1317    1.1       mrg  *
   1318  1.133        ad  * => erase page's identity (i.e. remove from object)
   1319    1.1       mrg  * => put page on free list
   1320    1.1       mrg  * => caller must lock owning object (either anon or uvm_object)
   1321    1.1       mrg  * => caller must lock page queues
   1322    1.1       mrg  * => assumes all valid mappings of pg are gone
   1323    1.1       mrg  */
   1324    1.1       mrg 
   1325   1.44       chs void
   1326  1.105   thorpej uvm_pagefree(struct vm_page *pg)
   1327    1.1       mrg {
   1328  1.133        ad 	struct pgflist *pgfl;
   1329  1.133        ad 	struct uvm_cpu *ucpu;
   1330  1.133        ad 	int index, color, queue;
   1331  1.118   thorpej 	bool iszero;
   1332   1.67       chs 
   1333   1.44       chs #ifdef DEBUG
   1334   1.44       chs 	if (pg->uobject == (void *)0xdeadbeef &&
   1335   1.44       chs 	    pg->uanon == (void *)0xdeadbeef) {
   1336   1.79    provos 		panic("uvm_pagefree: freeing free page %p", pg);
   1337   1.44       chs 	}
   1338   1.91      yamt #endif /* DEBUG */
   1339   1.44       chs 
   1340  1.123        ad 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1341  1.128      yamt 	KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
   1342  1.128      yamt 	KASSERT(pg->uobject == NULL || mutex_owned(&pg->uobject->vmobjlock));
   1343  1.127        ad 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1344  1.127        ad 		mutex_owned(&pg->uanon->an_lock));
   1345  1.123        ad 
   1346    1.7       mrg 	/*
   1347   1.67       chs 	 * if the page is loaned, resolve the loan instead of freeing.
   1348    1.7       mrg 	 */
   1349    1.7       mrg 
   1350   1.67       chs 	if (pg->loan_count) {
   1351   1.70       chs 		KASSERT(pg->wire_count == 0);
   1352    1.7       mrg 
   1353    1.7       mrg 		/*
   1354   1.67       chs 		 * if the page is owned by an anon then we just want to
   1355   1.70       chs 		 * drop anon ownership.  the kernel will free the page when
   1356   1.70       chs 		 * it is done with it.  if the page is owned by an object,
   1357   1.70       chs 		 * remove it from the object and mark it dirty for the benefit
   1358   1.70       chs 		 * of possible anon owners.
   1359   1.70       chs 		 *
   1360   1.70       chs 		 * regardless of previous ownership, wakeup any waiters,
   1361   1.70       chs 		 * unbusy the page, and we're done.
   1362    1.7       mrg 		 */
   1363    1.7       mrg 
   1364   1.73       chs 		if (pg->uobject != NULL) {
   1365   1.70       chs 			uvm_pageremove(pg);
   1366   1.67       chs 			pg->flags &= ~PG_CLEAN;
   1367   1.73       chs 		} else if (pg->uanon != NULL) {
   1368   1.73       chs 			if ((pg->pqflags & PQ_ANON) == 0) {
   1369   1.73       chs 				pg->loan_count--;
   1370   1.73       chs 			} else {
   1371   1.73       chs 				pg->pqflags &= ~PQ_ANON;
   1372  1.126        ad 				atomic_dec_uint(&uvmexp.anonpages);
   1373   1.73       chs 			}
   1374  1.103      yamt 			pg->uanon->an_page = NULL;
   1375   1.73       chs 			pg->uanon = NULL;
   1376   1.67       chs 		}
   1377   1.70       chs 		if (pg->flags & PG_WANTED) {
   1378   1.70       chs 			wakeup(pg);
   1379   1.70       chs 		}
   1380   1.84  perseant 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
   1381   1.70       chs #ifdef UVM_PAGE_TRKOWN
   1382   1.70       chs 		pg->owner_tag = NULL;
   1383   1.70       chs #endif
   1384   1.73       chs 		if (pg->loan_count) {
   1385  1.115      yamt 			KASSERT(pg->uobject == NULL);
   1386  1.115      yamt 			if (pg->uanon == NULL) {
   1387  1.115      yamt 				uvm_pagedequeue(pg);
   1388  1.115      yamt 			}
   1389   1.73       chs 			return;
   1390   1.73       chs 		}
   1391   1.67       chs 	}
   1392   1.62       chs 
   1393   1.67       chs 	/*
   1394   1.67       chs 	 * remove page from its object or anon.
   1395   1.67       chs 	 */
   1396   1.44       chs 
   1397   1.73       chs 	if (pg->uobject != NULL) {
   1398   1.67       chs 		uvm_pageremove(pg);
   1399   1.73       chs 	} else if (pg->uanon != NULL) {
   1400  1.103      yamt 		pg->uanon->an_page = NULL;
   1401  1.126        ad 		atomic_dec_uint(&uvmexp.anonpages);
   1402    1.7       mrg 	}
   1403    1.1       mrg 
   1404    1.7       mrg 	/*
   1405   1.70       chs 	 * now remove the page from the queues.
   1406    1.7       mrg 	 */
   1407    1.7       mrg 
   1408   1.67       chs 	uvm_pagedequeue(pg);
   1409    1.7       mrg 
   1410    1.7       mrg 	/*
   1411    1.7       mrg 	 * if the page was wired, unwire it now.
   1412    1.7       mrg 	 */
   1413   1.44       chs 
   1414   1.34   thorpej 	if (pg->wire_count) {
   1415    1.7       mrg 		pg->wire_count = 0;
   1416    1.7       mrg 		uvmexp.wired--;
   1417   1.44       chs 	}
   1418    1.7       mrg 
   1419    1.7       mrg 	/*
   1420   1.44       chs 	 * and put on free queue
   1421    1.7       mrg 	 */
   1422    1.7       mrg 
   1423   1.90      yamt 	iszero = (pg->flags & PG_ZERO);
   1424  1.133        ad 	index = uvm_page_lookup_freelist(pg);
   1425  1.133        ad 	color = VM_PGCOLOR_BUCKET(pg);
   1426  1.133        ad 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
   1427   1.34   thorpej 
   1428    1.7       mrg 	pg->pqflags = PQ_FREE;
   1429    1.3       chs #ifdef DEBUG
   1430    1.7       mrg 	pg->uobject = (void *)0xdeadbeef;
   1431    1.7       mrg 	pg->uanon = (void *)0xdeadbeef;
   1432    1.3       chs #endif
   1433   1.90      yamt 
   1434  1.123        ad 	mutex_spin_enter(&uvm_fpageqlock);
   1435   1.91      yamt 
   1436   1.91      yamt #ifdef DEBUG
   1437   1.91      yamt 	if (iszero)
   1438   1.91      yamt 		uvm_pagezerocheck(pg);
   1439   1.91      yamt #endif /* DEBUG */
   1440   1.91      yamt 
   1441  1.133        ad 
   1442  1.133        ad 	/* global list */
   1443  1.133        ad 	pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1444  1.133        ad 	LIST_INSERT_HEAD(pgfl, pg, pageq.list);
   1445    1.7       mrg 	uvmexp.free++;
   1446  1.133        ad 	if (iszero) {
   1447   1.90      yamt 		uvmexp.zeropages++;
   1448  1.133        ad 	}
   1449   1.34   thorpej 
   1450  1.133        ad 	/* per-cpu list */
   1451  1.133        ad 	ucpu = curcpu()->ci_data.cpu_uvm;
   1452  1.133        ad 	pg->offset = (uintptr_t)ucpu;
   1453  1.133        ad 	pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1454  1.133        ad 	LIST_INSERT_HEAD(pgfl, pg, listq.list);
   1455  1.133        ad 	ucpu->pages[queue]++;
   1456  1.133        ad 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1457  1.133        ad 		ucpu->page_idle_zero = vm_page_zero_enable;
   1458  1.133        ad 	}
   1459   1.34   thorpej 
   1460  1.123        ad 	mutex_spin_exit(&uvm_fpageqlock);
   1461   1.44       chs }
   1462   1.44       chs 
   1463   1.44       chs /*
   1464   1.44       chs  * uvm_page_unbusy: unbusy an array of pages.
   1465   1.44       chs  *
   1466   1.44       chs  * => pages must either all belong to the same object, or all belong to anons.
   1467   1.44       chs  * => if pages are object-owned, object must be locked.
   1468   1.67       chs  * => if pages are anon-owned, anons must be locked.
   1469   1.76     enami  * => caller must lock page queues if pages may be released.
   1470   1.98      yamt  * => caller must make sure that anon-owned pages are not PG_RELEASED.
   1471   1.44       chs  */
   1472   1.44       chs 
   1473   1.44       chs void
   1474  1.105   thorpej uvm_page_unbusy(struct vm_page **pgs, int npgs)
   1475   1.44       chs {
   1476   1.44       chs 	struct vm_page *pg;
   1477   1.44       chs 	int i;
   1478   1.44       chs 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1479   1.44       chs 
   1480   1.44       chs 	for (i = 0; i < npgs; i++) {
   1481   1.44       chs 		pg = pgs[i];
   1482   1.82     enami 		if (pg == NULL || pg == PGO_DONTCARE) {
   1483   1.44       chs 			continue;
   1484   1.44       chs 		}
   1485   1.98      yamt 
   1486  1.127        ad 		KASSERT(pg->uobject == NULL ||
   1487  1.127        ad 		    mutex_owned(&pg->uobject->vmobjlock));
   1488  1.127        ad 		KASSERT(pg->uobject != NULL ||
   1489  1.128      yamt 		    (pg->uanon != NULL && mutex_owned(&pg->uanon->an_lock)));
   1490   1.98      yamt 
   1491   1.98      yamt 		KASSERT(pg->flags & PG_BUSY);
   1492   1.98      yamt 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1493   1.44       chs 		if (pg->flags & PG_WANTED) {
   1494   1.44       chs 			wakeup(pg);
   1495   1.44       chs 		}
   1496   1.44       chs 		if (pg->flags & PG_RELEASED) {
   1497   1.44       chs 			UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
   1498   1.98      yamt 			KASSERT(pg->uobject != NULL ||
   1499   1.98      yamt 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
   1500   1.67       chs 			pg->flags &= ~PG_RELEASED;
   1501   1.67       chs 			uvm_pagefree(pg);
   1502   1.44       chs 		} else {
   1503   1.44       chs 			UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
   1504   1.44       chs 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1505   1.44       chs 			UVM_PAGE_OWN(pg, NULL);
   1506   1.44       chs 		}
   1507   1.44       chs 	}
   1508    1.1       mrg }
   1509    1.1       mrg 
   1510    1.1       mrg #if defined(UVM_PAGE_TRKOWN)
   1511    1.1       mrg /*
   1512    1.1       mrg  * uvm_page_own: set or release page ownership
   1513    1.1       mrg  *
   1514    1.1       mrg  * => this is a debugging function that keeps track of who sets PG_BUSY
   1515    1.1       mrg  *	and where they do it.   it can be used to track down problems
   1516    1.1       mrg  *	such a process setting "PG_BUSY" and never releasing it.
   1517    1.1       mrg  * => page's object [if any] must be locked
   1518    1.1       mrg  * => if "tag" is NULL then we are releasing page ownership
   1519    1.1       mrg  */
   1520    1.7       mrg void
   1521  1.105   thorpej uvm_page_own(struct vm_page *pg, const char *tag)
   1522    1.1       mrg {
   1523  1.112      yamt 	struct uvm_object *uobj;
   1524  1.112      yamt 	struct vm_anon *anon;
   1525  1.112      yamt 
   1526   1.67       chs 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1527   1.67       chs 
   1528  1.112      yamt 	uobj = pg->uobject;
   1529  1.112      yamt 	anon = pg->uanon;
   1530  1.112      yamt 	if (uobj != NULL) {
   1531  1.127        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
   1532  1.112      yamt 	} else if (anon != NULL) {
   1533  1.127        ad 		KASSERT(mutex_owned(&anon->an_lock));
   1534  1.112      yamt 	}
   1535  1.112      yamt 
   1536  1.112      yamt 	KASSERT((pg->flags & PG_WANTED) == 0);
   1537  1.112      yamt 
   1538    1.7       mrg 	/* gain ownership? */
   1539    1.7       mrg 	if (tag) {
   1540  1.112      yamt 		KASSERT((pg->flags & PG_BUSY) != 0);
   1541    1.7       mrg 		if (pg->owner_tag) {
   1542    1.7       mrg 			printf("uvm_page_own: page %p already owned "
   1543    1.7       mrg 			    "by proc %d [%s]\n", pg,
   1544   1.74     enami 			    pg->owner, pg->owner_tag);
   1545    1.7       mrg 			panic("uvm_page_own");
   1546    1.7       mrg 		}
   1547    1.7       mrg 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1548  1.120  perseant 		pg->lowner = (curlwp) ? curlwp->l_lid :  (lwpid_t) -1;
   1549    1.7       mrg 		pg->owner_tag = tag;
   1550    1.7       mrg 		return;
   1551    1.7       mrg 	}
   1552    1.7       mrg 
   1553    1.7       mrg 	/* drop ownership */
   1554  1.112      yamt 	KASSERT((pg->flags & PG_BUSY) == 0);
   1555    1.7       mrg 	if (pg->owner_tag == NULL) {
   1556    1.7       mrg 		printf("uvm_page_own: dropping ownership of an non-owned "
   1557    1.7       mrg 		    "page (%p)\n", pg);
   1558    1.7       mrg 		panic("uvm_page_own");
   1559    1.7       mrg 	}
   1560  1.115      yamt 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1561  1.115      yamt 		KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
   1562  1.115      yamt 		    pg->wire_count > 0);
   1563  1.115      yamt 	} else {
   1564  1.115      yamt 		KASSERT(pg->wire_count == 0);
   1565  1.115      yamt 	}
   1566    1.7       mrg 	pg->owner_tag = NULL;
   1567    1.1       mrg }
   1568    1.1       mrg #endif
   1569   1.34   thorpej 
   1570   1.34   thorpej /*
   1571   1.34   thorpej  * uvm_pageidlezero: zero free pages while the system is idle.
   1572   1.34   thorpej  *
   1573   1.54   thorpej  * => try to complete one color bucket at a time, to reduce our impact
   1574   1.54   thorpej  *	on the CPU cache.
   1575  1.132        ad  * => we loop until we either reach the target or there is a lwp ready
   1576  1.132        ad  *      to run, or MD code detects a reason to break early.
   1577   1.34   thorpej  */
   1578   1.34   thorpej void
   1579  1.105   thorpej uvm_pageidlezero(void)
   1580   1.34   thorpej {
   1581   1.34   thorpej 	struct vm_page *pg;
   1582  1.133        ad 	struct pgfreelist *pgfl, *gpgfl;
   1583  1.133        ad 	struct uvm_cpu *ucpu;
   1584  1.133        ad 	int free_list, firstbucket, nextbucket;
   1585  1.133        ad 
   1586  1.133        ad 	ucpu = curcpu()->ci_data.cpu_uvm;
   1587  1.133        ad 	if (!ucpu->page_idle_zero ||
   1588  1.133        ad 	    ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1589  1.133        ad 	    	ucpu->page_idle_zero = false;
   1590  1.132        ad 		return;
   1591  1.132        ad 	}
   1592  1.133        ad 	mutex_enter(&uvm_fpageqlock);
   1593  1.133        ad 	firstbucket = ucpu->page_free_nextcolor;
   1594  1.133        ad 	nextbucket = firstbucket;
   1595   1.58     enami 	do {
   1596  1.121      yamt 		if (sched_curcpu_runnable_p()) {
   1597  1.133        ad 			break;
   1598   1.34   thorpej 		}
   1599   1.54   thorpej 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
   1600  1.133        ad 			pgfl = &ucpu->page_free[free_list];
   1601  1.133        ad 			gpgfl = &uvm.page_free[free_list];
   1602  1.133        ad 			while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
   1603   1.54   thorpej 			    nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
   1604  1.132        ad 				if (sched_curcpu_runnable_p()) {
   1605  1.101      yamt 					goto quit;
   1606  1.132        ad 				}
   1607  1.133        ad 				LIST_REMOVE(pg, pageq.list); /* global list */
   1608  1.133        ad 				LIST_REMOVE(pg, listq.list); /* per-cpu list */
   1609  1.133        ad 				ucpu->pages[PGFL_UNKNOWN]--;
   1610   1.54   thorpej 				uvmexp.free--;
   1611  1.123        ad 				mutex_spin_exit(&uvm_fpageqlock);
   1612   1.34   thorpej #ifdef PMAP_PAGEIDLEZERO
   1613   1.67       chs 				if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
   1614   1.67       chs 
   1615   1.54   thorpej 					/*
   1616   1.54   thorpej 					 * The machine-dependent code detected
   1617   1.54   thorpej 					 * some reason for us to abort zeroing
   1618   1.54   thorpej 					 * pages, probably because there is a
   1619   1.54   thorpej 					 * process now ready to run.
   1620   1.54   thorpej 					 */
   1621   1.67       chs 
   1622  1.123        ad 					mutex_spin_enter(&uvm_fpageqlock);
   1623  1.133        ad 					LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1624  1.133        ad 					    nextbucket].pgfl_queues[
   1625  1.133        ad 					    PGFL_UNKNOWN], pg, pageq.list);
   1626  1.133        ad 					LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1627   1.54   thorpej 					    nextbucket].pgfl_queues[
   1628  1.133        ad 					    PGFL_UNKNOWN], pg, listq.list);
   1629  1.133        ad 					ucpu->pages[PGFL_UNKNOWN]++;
   1630   1.54   thorpej 					uvmexp.free++;
   1631   1.54   thorpej 					uvmexp.zeroaborts++;
   1632  1.101      yamt 					goto quit;
   1633   1.54   thorpej 				}
   1634   1.54   thorpej #else
   1635   1.54   thorpej 				pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1636   1.54   thorpej #endif /* PMAP_PAGEIDLEZERO */
   1637   1.54   thorpej 				pg->flags |= PG_ZERO;
   1638   1.54   thorpej 
   1639  1.123        ad 				mutex_spin_enter(&uvm_fpageqlock);
   1640  1.133        ad 				LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1641  1.133        ad 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1642  1.133        ad 				    pg, pageq.list);
   1643  1.133        ad 				LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1644   1.54   thorpej 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1645  1.133        ad 				    pg, listq.list);
   1646  1.133        ad 				ucpu->pages[PGFL_ZEROS]++;
   1647   1.54   thorpej 				uvmexp.free++;
   1648   1.54   thorpej 				uvmexp.zeropages++;
   1649   1.54   thorpej 			}
   1650   1.41   thorpej 		}
   1651  1.133        ad 		if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1652  1.133        ad 			break;
   1653  1.133        ad 		}
   1654   1.60   thorpej 		nextbucket = (nextbucket + 1) & uvmexp.colormask;
   1655   1.58     enami 	} while (nextbucket != firstbucket);
   1656  1.133        ad 	ucpu->page_idle_zero = false;
   1657  1.133        ad  quit:
   1658  1.123        ad 	mutex_spin_exit(&uvm_fpageqlock);
   1659   1.34   thorpej }
   1660  1.110      yamt 
   1661  1.110      yamt /*
   1662  1.110      yamt  * uvm_pagelookup: look up a page
   1663  1.110      yamt  *
   1664  1.110      yamt  * => caller should lock object to keep someone from pulling the page
   1665  1.110      yamt  *	out from under it
   1666  1.110      yamt  */
   1667  1.110      yamt 
   1668  1.110      yamt struct vm_page *
   1669  1.110      yamt uvm_pagelookup(struct uvm_object *obj, voff_t off)
   1670  1.110      yamt {
   1671  1.110      yamt 	struct vm_page *pg;
   1672  1.110      yamt 
   1673  1.127        ad 	KASSERT(mutex_owned(&obj->vmobjlock));
   1674  1.123        ad 
   1675  1.134        ad 	pg = (struct vm_page *)rb_tree_find_node(&obj->rb_tree, &off);
   1676  1.134        ad 
   1677  1.110      yamt 	KASSERT(pg == NULL || obj->uo_npages != 0);
   1678  1.110      yamt 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1679  1.110      yamt 		(pg->flags & PG_BUSY) != 0);
   1680  1.110      yamt 	return(pg);
   1681  1.110      yamt }
   1682  1.110      yamt 
   1683  1.110      yamt /*
   1684  1.110      yamt  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
   1685  1.110      yamt  *
   1686  1.110      yamt  * => caller must lock page queues
   1687  1.110      yamt  */
   1688  1.110      yamt 
   1689  1.110      yamt void
   1690  1.110      yamt uvm_pagewire(struct vm_page *pg)
   1691  1.110      yamt {
   1692  1.127        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
   1693  1.113      yamt #if defined(READAHEAD_STATS)
   1694  1.113      yamt 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1695  1.113      yamt 		uvm_ra_hit.ev_count++;
   1696  1.113      yamt 		pg->pqflags &= ~PQ_READAHEAD;
   1697  1.113      yamt 	}
   1698  1.113      yamt #endif /* defined(READAHEAD_STATS) */
   1699  1.110      yamt 	if (pg->wire_count == 0) {
   1700  1.110      yamt 		uvm_pagedequeue(pg);
   1701  1.110      yamt 		uvmexp.wired++;
   1702  1.110      yamt 	}
   1703  1.110      yamt 	pg->wire_count++;
   1704  1.110      yamt }
   1705  1.110      yamt 
   1706  1.110      yamt /*
   1707  1.110      yamt  * uvm_pageunwire: unwire the page.
   1708  1.110      yamt  *
   1709  1.110      yamt  * => activate if wire count goes to zero.
   1710  1.110      yamt  * => caller must lock page queues
   1711  1.110      yamt  */
   1712  1.110      yamt 
   1713  1.110      yamt void
   1714  1.110      yamt uvm_pageunwire(struct vm_page *pg)
   1715  1.110      yamt {
   1716  1.127        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
   1717  1.110      yamt 	pg->wire_count--;
   1718  1.110      yamt 	if (pg->wire_count == 0) {
   1719  1.111      yamt 		uvm_pageactivate(pg);
   1720  1.110      yamt 		uvmexp.wired--;
   1721  1.110      yamt 	}
   1722  1.110      yamt }
   1723  1.110      yamt 
   1724  1.110      yamt /*
   1725  1.110      yamt  * uvm_pagedeactivate: deactivate page
   1726  1.110      yamt  *
   1727  1.110      yamt  * => caller must lock page queues
   1728  1.110      yamt  * => caller must check to make sure page is not wired
   1729  1.110      yamt  * => object that page belongs to must be locked (so we can adjust pg->flags)
   1730  1.110      yamt  * => caller must clear the reference on the page before calling
   1731  1.110      yamt  */
   1732  1.110      yamt 
   1733  1.110      yamt void
   1734  1.110      yamt uvm_pagedeactivate(struct vm_page *pg)
   1735  1.110      yamt {
   1736  1.113      yamt 
   1737  1.127        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
   1738  1.113      yamt 	KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
   1739  1.113      yamt 	uvmpdpol_pagedeactivate(pg);
   1740  1.110      yamt }
   1741  1.110      yamt 
   1742  1.110      yamt /*
   1743  1.110      yamt  * uvm_pageactivate: activate page
   1744  1.110      yamt  *
   1745  1.110      yamt  * => caller must lock page queues
   1746  1.110      yamt  */
   1747  1.110      yamt 
   1748  1.110      yamt void
   1749  1.110      yamt uvm_pageactivate(struct vm_page *pg)
   1750  1.110      yamt {
   1751  1.113      yamt 
   1752  1.127        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
   1753  1.113      yamt #if defined(READAHEAD_STATS)
   1754  1.113      yamt 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1755  1.113      yamt 		uvm_ra_hit.ev_count++;
   1756  1.113      yamt 		pg->pqflags &= ~PQ_READAHEAD;
   1757  1.113      yamt 	}
   1758  1.113      yamt #endif /* defined(READAHEAD_STATS) */
   1759  1.113      yamt 	if (pg->wire_count != 0) {
   1760  1.113      yamt 		return;
   1761  1.110      yamt 	}
   1762  1.113      yamt 	uvmpdpol_pageactivate(pg);
   1763  1.110      yamt }
   1764  1.110      yamt 
   1765  1.110      yamt /*
   1766  1.110      yamt  * uvm_pagedequeue: remove a page from any paging queue
   1767  1.110      yamt  */
   1768  1.110      yamt 
   1769  1.110      yamt void
   1770  1.110      yamt uvm_pagedequeue(struct vm_page *pg)
   1771  1.110      yamt {
   1772  1.113      yamt 
   1773  1.113      yamt 	if (uvmpdpol_pageisqueued_p(pg)) {
   1774  1.127        ad 		KASSERT(mutex_owned(&uvm_pageqlock));
   1775  1.110      yamt 	}
   1776  1.123        ad 
   1777  1.113      yamt 	uvmpdpol_pagedequeue(pg);
   1778  1.113      yamt }
   1779  1.113      yamt 
   1780  1.113      yamt /*
   1781  1.113      yamt  * uvm_pageenqueue: add a page to a paging queue without activating.
   1782  1.113      yamt  * used where a page is not really demanded (yet).  eg. read-ahead
   1783  1.113      yamt  */
   1784  1.113      yamt 
   1785  1.113      yamt void
   1786  1.113      yamt uvm_pageenqueue(struct vm_page *pg)
   1787  1.113      yamt {
   1788  1.113      yamt 
   1789  1.127        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
   1790  1.113      yamt 	if (pg->wire_count != 0) {
   1791  1.113      yamt 		return;
   1792  1.113      yamt 	}
   1793  1.113      yamt 	uvmpdpol_pageenqueue(pg);
   1794  1.110      yamt }
   1795  1.110      yamt 
   1796  1.110      yamt /*
   1797  1.110      yamt  * uvm_pagezero: zero fill a page
   1798  1.110      yamt  *
   1799  1.110      yamt  * => if page is part of an object then the object should be locked
   1800  1.110      yamt  *	to protect pg->flags.
   1801  1.110      yamt  */
   1802  1.110      yamt 
   1803  1.110      yamt void
   1804  1.110      yamt uvm_pagezero(struct vm_page *pg)
   1805  1.110      yamt {
   1806  1.110      yamt 	pg->flags &= ~PG_CLEAN;
   1807  1.110      yamt 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1808  1.110      yamt }
   1809  1.110      yamt 
   1810  1.110      yamt /*
   1811  1.110      yamt  * uvm_pagecopy: copy a page
   1812  1.110      yamt  *
   1813  1.110      yamt  * => if page is part of an object then the object should be locked
   1814  1.110      yamt  *	to protect pg->flags.
   1815  1.110      yamt  */
   1816  1.110      yamt 
   1817  1.110      yamt void
   1818  1.110      yamt uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
   1819  1.110      yamt {
   1820  1.110      yamt 
   1821  1.110      yamt 	dst->flags &= ~PG_CLEAN;
   1822  1.110      yamt 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
   1823  1.110      yamt }
   1824  1.110      yamt 
   1825  1.110      yamt /*
   1826  1.110      yamt  * uvm_page_lookup_freelist: look up the free list for the specified page
   1827  1.110      yamt  */
   1828  1.110      yamt 
   1829  1.110      yamt int
   1830  1.110      yamt uvm_page_lookup_freelist(struct vm_page *pg)
   1831  1.110      yamt {
   1832  1.110      yamt 	int lcv;
   1833  1.110      yamt 
   1834  1.110      yamt 	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
   1835  1.110      yamt 	KASSERT(lcv != -1);
   1836  1.110      yamt 	return (vm_physmem[lcv].free_list);
   1837  1.110      yamt }
   1838