Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.162
      1 /*	$NetBSD: uvm_page.c,v 1.162 2010/11/12 03:21:04 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     31  * Copyright (c) 1991, 1993, The Regents of the University of California.
     32  *
     33  * All rights reserved.
     34  *
     35  * This code is derived from software contributed to Berkeley by
     36  * The Mach Operating System project at Carnegie-Mellon University.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. All advertising materials mentioning features or use of this software
     47  *    must display the following acknowledgement:
     48  *	This product includes software developed by Charles D. Cranor,
     49  *      Washington University, the University of California, Berkeley and
     50  *      its contributors.
     51  * 4. Neither the name of the University nor the names of its contributors
     52  *    may be used to endorse or promote products derived from this software
     53  *    without specific prior written permission.
     54  *
     55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     65  * SUCH DAMAGE.
     66  *
     67  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     68  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     69  *
     70  *
     71  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     72  * All rights reserved.
     73  *
     74  * Permission to use, copy, modify and distribute this software and
     75  * its documentation is hereby granted, provided that both the copyright
     76  * notice and this permission notice appear in all copies of the
     77  * software, derivative works or modified versions, and any portions
     78  * thereof, and that both notices appear in supporting documentation.
     79  *
     80  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     81  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     82  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     83  *
     84  * Carnegie Mellon requests users of this software to return to
     85  *
     86  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     87  *  School of Computer Science
     88  *  Carnegie Mellon University
     89  *  Pittsburgh PA 15213-3890
     90  *
     91  * any improvements or extensions that they make and grant Carnegie the
     92  * rights to redistribute these changes.
     93  */
     94 
     95 /*
     96  * uvm_page.c: page ops.
     97  */
     98 
     99 #include <sys/cdefs.h>
    100 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.162 2010/11/12 03:21:04 uebayasi Exp $");
    101 
    102 #include "opt_ddb.h"
    103 #include "opt_uvmhist.h"
    104 #include "opt_readahead.h"
    105 
    106 #include <sys/param.h>
    107 #include <sys/systm.h>
    108 #include <sys/malloc.h>
    109 #include <sys/sched.h>
    110 #include <sys/kernel.h>
    111 #include <sys/vnode.h>
    112 #include <sys/proc.h>
    113 #include <sys/atomic.h>
    114 #include <sys/cpu.h>
    115 
    116 #include <uvm/uvm.h>
    117 #include <uvm/uvm_ddb.h>
    118 #include <uvm/uvm_pdpolicy.h>
    119 
    120 /*
    121  * global vars... XXXCDC: move to uvm. structure.
    122  */
    123 
    124 /*
    125  * physical memory config is stored in vm_physmem.
    126  */
    127 
    128 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
    129 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
    130 #define	vm_nphysmem	vm_nphysseg
    131 
    132 /*
    133  * Some supported CPUs in a given architecture don't support all
    134  * of the things necessary to do idle page zero'ing efficiently.
    135  * We therefore provide a way to enable it from machdep code here.
    136  */
    137 bool vm_page_zero_enable = false;
    138 
    139 /*
    140  * number of pages per-CPU to reserve for the kernel.
    141  */
    142 int vm_page_reserve_kernel = 5;
    143 
    144 /*
    145  * physical memory size;
    146  */
    147 int physmem;
    148 
    149 /*
    150  * local variables
    151  */
    152 
    153 /*
    154  * these variables record the values returned by vm_page_bootstrap,
    155  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    156  * and pmap_startup here also uses them internally.
    157  */
    158 
    159 static vaddr_t      virtual_space_start;
    160 static vaddr_t      virtual_space_end;
    161 
    162 /*
    163  * we allocate an initial number of page colors in uvm_page_init(),
    164  * and remember them.  We may re-color pages as cache sizes are
    165  * discovered during the autoconfiguration phase.  But we can never
    166  * free the initial set of buckets, since they are allocated using
    167  * uvm_pageboot_alloc().
    168  */
    169 
    170 static bool have_recolored_pages /* = false */;
    171 
    172 MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
    173 
    174 #ifdef DEBUG
    175 vaddr_t uvm_zerocheckkva;
    176 #endif /* DEBUG */
    177 
    178 /*
    179  * local prototypes
    180  */
    181 
    182 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
    183 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
    184 
    185 /*
    186  * per-object tree of pages
    187  */
    188 
    189 static signed int
    190 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
    191 {
    192 	const struct vm_page *pg1 = n1;
    193 	const struct vm_page *pg2 = n2;
    194 	const voff_t a = pg1->offset;
    195 	const voff_t b = pg2->offset;
    196 
    197 	if (a < b)
    198 		return -1;
    199 	if (a > b)
    200 		return 1;
    201 	return 0;
    202 }
    203 
    204 static signed int
    205 uvm_page_compare_key(void *ctx, const void *n, const void *key)
    206 {
    207 	const struct vm_page *pg = n;
    208 	const voff_t a = pg->offset;
    209 	const voff_t b = *(const voff_t *)key;
    210 
    211 	if (a < b)
    212 		return -1;
    213 	if (a > b)
    214 		return 1;
    215 	return 0;
    216 }
    217 
    218 const rb_tree_ops_t uvm_page_tree_ops = {
    219 	.rbto_compare_nodes = uvm_page_compare_nodes,
    220 	.rbto_compare_key = uvm_page_compare_key,
    221 	.rbto_node_offset = offsetof(struct vm_page, rb_node),
    222 	.rbto_context = NULL
    223 };
    224 
    225 /*
    226  * inline functions
    227  */
    228 
    229 /*
    230  * uvm_pageinsert: insert a page in the object.
    231  *
    232  * => caller must lock object
    233  * => caller must lock page queues
    234  * => call should have already set pg's object and offset pointers
    235  *    and bumped the version counter
    236  */
    237 
    238 static inline void
    239 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
    240     struct vm_page *where)
    241 {
    242 
    243 	KASSERT(uobj == pg->uobject);
    244 	KASSERT(mutex_owned(&uobj->vmobjlock));
    245 	KASSERT((pg->flags & PG_TABLED) == 0);
    246 	KASSERT(where == NULL || (where->flags & PG_TABLED));
    247 	KASSERT(where == NULL || (where->uobject == uobj));
    248 
    249 	if (UVM_OBJ_IS_VNODE(uobj)) {
    250 		if (uobj->uo_npages == 0) {
    251 			struct vnode *vp = (struct vnode *)uobj;
    252 
    253 			vholdl(vp);
    254 		}
    255 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    256 			atomic_inc_uint(&uvmexp.execpages);
    257 		} else {
    258 			atomic_inc_uint(&uvmexp.filepages);
    259 		}
    260 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    261 		atomic_inc_uint(&uvmexp.anonpages);
    262 	}
    263 
    264 	if (where)
    265 		TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
    266 	else
    267 		TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    268 	pg->flags |= PG_TABLED;
    269 	uobj->uo_npages++;
    270 }
    271 
    272 
    273 static inline void
    274 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
    275 {
    276 	struct vm_page *ret;
    277 
    278 	KASSERT(uobj == pg->uobject);
    279 	ret = rb_tree_insert_node(&uobj->rb_tree, pg);
    280 	KASSERT(ret == pg);
    281 }
    282 
    283 static inline void
    284 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
    285 {
    286 
    287 	KDASSERT(uobj != NULL);
    288 	uvm_pageinsert_tree(uobj, pg);
    289 	uvm_pageinsert_list(uobj, pg, NULL);
    290 }
    291 
    292 /*
    293  * uvm_page_remove: remove page from object.
    294  *
    295  * => caller must lock object
    296  * => caller must lock page queues
    297  */
    298 
    299 static inline void
    300 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
    301 {
    302 
    303 	KASSERT(uobj == pg->uobject);
    304 	KASSERT(mutex_owned(&uobj->vmobjlock));
    305 	KASSERT(pg->flags & PG_TABLED);
    306 
    307 	if (UVM_OBJ_IS_VNODE(uobj)) {
    308 		if (uobj->uo_npages == 1) {
    309 			struct vnode *vp = (struct vnode *)uobj;
    310 
    311 			holdrelel(vp);
    312 		}
    313 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    314 			atomic_dec_uint(&uvmexp.execpages);
    315 		} else {
    316 			atomic_dec_uint(&uvmexp.filepages);
    317 		}
    318 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    319 		atomic_dec_uint(&uvmexp.anonpages);
    320 	}
    321 
    322 	/* object should be locked */
    323 	uobj->uo_npages--;
    324 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    325 	pg->flags &= ~PG_TABLED;
    326 	pg->uobject = NULL;
    327 }
    328 
    329 static inline void
    330 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
    331 {
    332 
    333 	KASSERT(uobj == pg->uobject);
    334 	rb_tree_remove_node(&uobj->rb_tree, pg);
    335 }
    336 
    337 static inline void
    338 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
    339 {
    340 
    341 	KDASSERT(uobj != NULL);
    342 	uvm_pageremove_tree(uobj, pg);
    343 	uvm_pageremove_list(uobj, pg);
    344 }
    345 
    346 static void
    347 uvm_page_init_buckets(struct pgfreelist *pgfl)
    348 {
    349 	int color, i;
    350 
    351 	for (color = 0; color < uvmexp.ncolors; color++) {
    352 		for (i = 0; i < PGFL_NQUEUES; i++) {
    353 			LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
    354 		}
    355 	}
    356 }
    357 
    358 /*
    359  * uvm_page_init: init the page system.   called from uvm_init().
    360  *
    361  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    362  */
    363 
    364 void
    365 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
    366 {
    367 	static struct uvm_cpu boot_cpu;
    368 	psize_t freepages, pagecount, bucketcount, n;
    369 	struct pgflbucket *bucketarray, *cpuarray;
    370 	struct vm_physseg *seg;
    371 	struct vm_page *pagearray;
    372 	int lcv;
    373 	u_int i;
    374 	paddr_t paddr;
    375 
    376 	KASSERT(ncpu <= 1);
    377 	CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
    378 
    379 	/*
    380 	 * init the page queues and page queue locks, except the free
    381 	 * list; we allocate that later (with the initial vm_page
    382 	 * structures).
    383 	 */
    384 
    385 	uvm.cpus[0] = &boot_cpu;
    386 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
    387 	uvm_reclaim_init();
    388 	uvmpdpol_init();
    389 	mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
    390 	mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
    391 
    392 	/*
    393 	 * allocate vm_page structures.
    394 	 */
    395 
    396 	/*
    397 	 * sanity check:
    398 	 * before calling this function the MD code is expected to register
    399 	 * some free RAM with the uvm_page_physload() function.   our job
    400 	 * now is to allocate vm_page structures for this memory.
    401 	 */
    402 
    403 	if (vm_nphysmem == 0)
    404 		panic("uvm_page_bootstrap: no memory pre-allocated");
    405 
    406 	/*
    407 	 * first calculate the number of free pages...
    408 	 *
    409 	 * note that we use start/end rather than avail_start/avail_end.
    410 	 * this allows us to allocate extra vm_page structures in case we
    411 	 * want to return some memory to the pool after booting.
    412 	 */
    413 
    414 	freepages = 0;
    415 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    416 		seg = VM_PHYSMEM_PTR(lcv);
    417 		freepages += (seg->end - seg->start);
    418 	}
    419 
    420 	/*
    421 	 * Let MD code initialize the number of colors, or default
    422 	 * to 1 color if MD code doesn't care.
    423 	 */
    424 	if (uvmexp.ncolors == 0)
    425 		uvmexp.ncolors = 1;
    426 	uvmexp.colormask = uvmexp.ncolors - 1;
    427 
    428 	/*
    429 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    430 	 * use.   for each page of memory we use we need a vm_page structure.
    431 	 * thus, the total number of pages we can use is the total size of
    432 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    433 	 * structure.   we add one to freepages as a fudge factor to avoid
    434 	 * truncation errors (since we can only allocate in terms of whole
    435 	 * pages).
    436 	 */
    437 
    438 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
    439 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    440 	    (PAGE_SIZE + sizeof(struct vm_page));
    441 
    442 	bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
    443 	    sizeof(struct pgflbucket) * 2) + (pagecount *
    444 	    sizeof(struct vm_page)));
    445 	cpuarray = bucketarray + bucketcount;
    446 	pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
    447 
    448 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    449 		uvm.page_free[lcv].pgfl_buckets =
    450 		    (bucketarray + (lcv * uvmexp.ncolors));
    451 		uvm_page_init_buckets(&uvm.page_free[lcv]);
    452 		uvm.cpus[0]->page_free[lcv].pgfl_buckets =
    453 		    (cpuarray + (lcv * uvmexp.ncolors));
    454 		uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
    455 	}
    456 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    457 
    458 	/*
    459 	 * init the vm_page structures and put them in the correct place.
    460 	 */
    461 
    462 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    463 		seg = VM_PHYSMEM_PTR(lcv);
    464 		n = seg->end - seg->start;
    465 
    466 		/* set up page array pointers */
    467 		seg->pgs = pagearray;
    468 		pagearray += n;
    469 		pagecount -= n;
    470 		seg->lastpg = seg->pgs + n;
    471 
    472 		/* init and free vm_pages (we've already zeroed them) */
    473 		paddr = ctob(seg->start);
    474 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    475 			seg->pgs[i].phys_addr = paddr;
    476 #ifdef __HAVE_VM_PAGE_MD
    477 			VM_MDPAGE_INIT(&seg->pgs[i]);
    478 #endif
    479 			if (atop(paddr) >= seg->avail_start &&
    480 			    atop(paddr) <= seg->avail_end) {
    481 				uvmexp.npages++;
    482 				/* add page to free pool */
    483 				uvm_pagefree(&seg->pgs[i]);
    484 			}
    485 		}
    486 	}
    487 
    488 	/*
    489 	 * pass up the values of virtual_space_start and
    490 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    491 	 * layers of the VM.
    492 	 */
    493 
    494 	*kvm_startp = round_page(virtual_space_start);
    495 	*kvm_endp = trunc_page(virtual_space_end);
    496 #ifdef DEBUG
    497 	/*
    498 	 * steal kva for uvm_pagezerocheck().
    499 	 */
    500 	uvm_zerocheckkva = *kvm_startp;
    501 	*kvm_startp += PAGE_SIZE;
    502 #endif /* DEBUG */
    503 
    504 	/*
    505 	 * init various thresholds.
    506 	 */
    507 
    508 	uvmexp.reserve_pagedaemon = 1;
    509 	uvmexp.reserve_kernel = vm_page_reserve_kernel;
    510 
    511 	/*
    512 	 * determine if we should zero pages in the idle loop.
    513 	 */
    514 
    515 	uvm.cpus[0]->page_idle_zero = vm_page_zero_enable;
    516 
    517 	/*
    518 	 * done!
    519 	 */
    520 
    521 	uvm.page_init_done = true;
    522 }
    523 
    524 /*
    525  * uvm_setpagesize: set the page size
    526  *
    527  * => sets page_shift and page_mask from uvmexp.pagesize.
    528  */
    529 
    530 void
    531 uvm_setpagesize(void)
    532 {
    533 
    534 	/*
    535 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
    536 	 * to be a constant (indicated by being a non-zero value).
    537 	 */
    538 	if (uvmexp.pagesize == 0) {
    539 		if (PAGE_SIZE == 0)
    540 			panic("uvm_setpagesize: uvmexp.pagesize not set");
    541 		uvmexp.pagesize = PAGE_SIZE;
    542 	}
    543 	uvmexp.pagemask = uvmexp.pagesize - 1;
    544 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    545 		panic("uvm_setpagesize: page size not a power of two");
    546 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    547 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    548 			break;
    549 }
    550 
    551 /*
    552  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    553  */
    554 
    555 vaddr_t
    556 uvm_pageboot_alloc(vsize_t size)
    557 {
    558 	static bool initialized = false;
    559 	vaddr_t addr;
    560 #if !defined(PMAP_STEAL_MEMORY)
    561 	vaddr_t vaddr;
    562 	paddr_t paddr;
    563 #endif
    564 
    565 	/*
    566 	 * on first call to this function, initialize ourselves.
    567 	 */
    568 	if (initialized == false) {
    569 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    570 
    571 		/* round it the way we like it */
    572 		virtual_space_start = round_page(virtual_space_start);
    573 		virtual_space_end = trunc_page(virtual_space_end);
    574 
    575 		initialized = true;
    576 	}
    577 
    578 	/* round to page size */
    579 	size = round_page(size);
    580 
    581 #if defined(PMAP_STEAL_MEMORY)
    582 
    583 	/*
    584 	 * defer bootstrap allocation to MD code (it may want to allocate
    585 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    586 	 * virtual_space_start/virtual_space_end if necessary.
    587 	 */
    588 
    589 	addr = pmap_steal_memory(size, &virtual_space_start,
    590 	    &virtual_space_end);
    591 
    592 	return(addr);
    593 
    594 #else /* !PMAP_STEAL_MEMORY */
    595 
    596 	/*
    597 	 * allocate virtual memory for this request
    598 	 */
    599 	if (virtual_space_start == virtual_space_end ||
    600 	    (virtual_space_end - virtual_space_start) < size)
    601 		panic("uvm_pageboot_alloc: out of virtual space");
    602 
    603 	addr = virtual_space_start;
    604 
    605 #ifdef PMAP_GROWKERNEL
    606 	/*
    607 	 * If the kernel pmap can't map the requested space,
    608 	 * then allocate more resources for it.
    609 	 */
    610 	if (uvm_maxkaddr < (addr + size)) {
    611 		uvm_maxkaddr = pmap_growkernel(addr + size);
    612 		if (uvm_maxkaddr < (addr + size))
    613 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    614 	}
    615 #endif
    616 
    617 	virtual_space_start += size;
    618 
    619 	/*
    620 	 * allocate and mapin physical pages to back new virtual pages
    621 	 */
    622 
    623 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    624 	    vaddr += PAGE_SIZE) {
    625 
    626 		if (!uvm_page_physget(&paddr))
    627 			panic("uvm_pageboot_alloc: out of memory");
    628 
    629 		/*
    630 		 * Note this memory is no longer managed, so using
    631 		 * pmap_kenter is safe.
    632 		 */
    633 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
    634 	}
    635 	pmap_update(pmap_kernel());
    636 	return(addr);
    637 #endif	/* PMAP_STEAL_MEMORY */
    638 }
    639 
    640 #if !defined(PMAP_STEAL_MEMORY)
    641 /*
    642  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    643  *
    644  * => attempt to allocate it off the end of a segment in which the "avail"
    645  *    values match the start/end values.   if we can't do that, then we
    646  *    will advance both values (making them equal, and removing some
    647  *    vm_page structures from the non-avail area).
    648  * => return false if out of memory.
    649  */
    650 
    651 /* subroutine: try to allocate from memory chunks on the specified freelist */
    652 static bool uvm_page_physget_freelist(paddr_t *, int);
    653 
    654 static bool
    655 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
    656 {
    657 	struct vm_physseg *seg;
    658 	int lcv, x;
    659 
    660 	/* pass 1: try allocating from a matching end */
    661 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    662 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
    663 #else
    664 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    665 #endif
    666 	{
    667 		seg = VM_PHYSMEM_PTR(lcv);
    668 
    669 		if (uvm.page_init_done == true)
    670 			panic("uvm_page_physget: called _after_ bootstrap");
    671 
    672 		if (seg->free_list != freelist)
    673 			continue;
    674 
    675 		/* try from front */
    676 		if (seg->avail_start == seg->start &&
    677 		    seg->avail_start < seg->avail_end) {
    678 			*paddrp = ctob(seg->avail_start);
    679 			seg->avail_start++;
    680 			seg->start++;
    681 			/* nothing left?   nuke it */
    682 			if (seg->avail_start == seg->end) {
    683 				if (vm_nphysmem == 1)
    684 				    panic("uvm_page_physget: out of memory!");
    685 				vm_nphysmem--;
    686 				for (x = lcv ; x < vm_nphysmem ; x++)
    687 					/* structure copy */
    688 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
    689 			}
    690 			return (true);
    691 		}
    692 
    693 		/* try from rear */
    694 		if (seg->avail_end == seg->end &&
    695 		    seg->avail_start < seg->avail_end) {
    696 			*paddrp = ctob(seg->avail_end - 1);
    697 			seg->avail_end--;
    698 			seg->end--;
    699 			/* nothing left?   nuke it */
    700 			if (seg->avail_end == seg->start) {
    701 				if (vm_nphysmem == 1)
    702 				    panic("uvm_page_physget: out of memory!");
    703 				vm_nphysmem--;
    704 				for (x = lcv ; x < vm_nphysmem ; x++)
    705 					/* structure copy */
    706 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
    707 			}
    708 			return (true);
    709 		}
    710 	}
    711 
    712 	/* pass2: forget about matching ends, just allocate something */
    713 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    714 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
    715 #else
    716 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    717 #endif
    718 	{
    719 		seg = VM_PHYSMEM_PTR(lcv);
    720 
    721 		/* any room in this bank? */
    722 		if (seg->avail_start >= seg->avail_end)
    723 			continue;  /* nope */
    724 
    725 		*paddrp = ctob(seg->avail_start);
    726 		seg->avail_start++;
    727 		/* truncate! */
    728 		seg->start = seg->avail_start;
    729 
    730 		/* nothing left?   nuke it */
    731 		if (seg->avail_start == seg->end) {
    732 			if (vm_nphysmem == 1)
    733 				panic("uvm_page_physget: out of memory!");
    734 			vm_nphysmem--;
    735 			for (x = lcv ; x < vm_nphysmem ; x++)
    736 				/* structure copy */
    737 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    738 		}
    739 		return (true);
    740 	}
    741 
    742 	return (false);        /* whoops! */
    743 }
    744 
    745 bool
    746 uvm_page_physget(paddr_t *paddrp)
    747 {
    748 	int i;
    749 
    750 	/* try in the order of freelist preference */
    751 	for (i = 0; i < VM_NFREELIST; i++)
    752 		if (uvm_page_physget_freelist(paddrp, i) == true)
    753 			return (true);
    754 	return (false);
    755 }
    756 #endif /* PMAP_STEAL_MEMORY */
    757 
    758 /*
    759  * uvm_page_physload: load physical memory into VM system
    760  *
    761  * => all args are PFs
    762  * => all pages in start/end get vm_page structures
    763  * => areas marked by avail_start/avail_end get added to the free page pool
    764  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    765  */
    766 
    767 void
    768 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
    769     paddr_t avail_end, int free_list)
    770 {
    771 	int preload, lcv;
    772 	psize_t npages;
    773 	struct vm_page *pgs;
    774 	struct vm_physseg *ps;
    775 
    776 	if (uvmexp.pagesize == 0)
    777 		panic("uvm_page_physload: page size not set!");
    778 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    779 		panic("uvm_page_physload: bad free list %d", free_list);
    780 	if (start >= end)
    781 		panic("uvm_page_physload: start >= end");
    782 
    783 	/*
    784 	 * do we have room?
    785 	 */
    786 
    787 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    788 		printf("uvm_page_physload: unable to load physical memory "
    789 		    "segment\n");
    790 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    791 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    792 		printf("\tincrease VM_PHYSSEG_MAX\n");
    793 		return;
    794 	}
    795 
    796 	/*
    797 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    798 	 * called yet, so malloc is not available).
    799 	 */
    800 
    801 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    802 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    803 			break;
    804 	}
    805 	preload = (lcv == vm_nphysmem);
    806 
    807 	/*
    808 	 * if VM is already running, attempt to malloc() vm_page structures
    809 	 */
    810 
    811 	if (!preload) {
    812 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
    813 	} else {
    814 		pgs = NULL;
    815 		npages = 0;
    816 	}
    817 
    818 	/*
    819 	 * now insert us in the proper place in vm_physmem[]
    820 	 */
    821 
    822 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    823 	/* random: put it at the end (easy!) */
    824 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    825 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    826 	{
    827 		int x;
    828 		/* sort by address for binary search */
    829 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    830 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    831 				break;
    832 		ps = VM_PHYSMEM_PTR(lcv);
    833 		/* move back other entries, if necessary ... */
    834 		for (x = vm_nphysmem ; x > lcv ; x--)
    835 			/* structure copy */
    836 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    837 	}
    838 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    839 	{
    840 		int x;
    841 		/* sort by largest segment first */
    842 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    843 			if ((end - start) >
    844 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    845 				break;
    846 		ps = VM_PHYSMEM_PTR(lcv);
    847 		/* move back other entries, if necessary ... */
    848 		for (x = vm_nphysmem ; x > lcv ; x--)
    849 			/* structure copy */
    850 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    851 	}
    852 #else
    853 	panic("uvm_page_physload: unknown physseg strategy selected!");
    854 #endif
    855 
    856 	ps->start = start;
    857 	ps->end = end;
    858 	ps->avail_start = avail_start;
    859 	ps->avail_end = avail_end;
    860 	if (preload) {
    861 		ps->pgs = NULL;
    862 	} else {
    863 		ps->pgs = pgs;
    864 		ps->lastpg = pgs + npages;
    865 	}
    866 	ps->free_list = free_list;
    867 	vm_nphysmem++;
    868 
    869 	if (!preload) {
    870 		uvmpdpol_reinit();
    871 	}
    872 }
    873 
    874 /*
    875  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    876  */
    877 
    878 #if VM_PHYSSEG_MAX == 1
    879 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
    880 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    881 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
    882 #else
    883 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
    884 #endif
    885 
    886 /*
    887  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    888  */
    889 int
    890 vm_physseg_find(paddr_t pframe, int *offp)
    891 {
    892 
    893 #if VM_PHYSSEG_MAX == 1
    894 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    895 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    896 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    897 #else
    898 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    899 #endif
    900 }
    901 
    902 #if VM_PHYSSEG_MAX == 1
    903 static inline int
    904 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    905 {
    906 
    907 	/* 'contig' case */
    908 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    909 		if (offp)
    910 			*offp = pframe - segs[0].start;
    911 		return(0);
    912 	}
    913 	return(-1);
    914 }
    915 
    916 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    917 
    918 static inline int
    919 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    920 {
    921 	/* binary search for it */
    922 	u_int	start, len, try;
    923 
    924 	/*
    925 	 * if try is too large (thus target is less than try) we reduce
    926 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    927 	 *
    928 	 * if the try is too small (thus target is greater than try) then
    929 	 * we set the new start to be (try + 1).   this means we need to
    930 	 * reduce the length to (round(len/2) - 1).
    931 	 *
    932 	 * note "adjust" below which takes advantage of the fact that
    933 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    934 	 * for any value of len we may have
    935 	 */
    936 
    937 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    938 		try = start + (len / 2);	/* try in the middle */
    939 
    940 		/* start past our try? */
    941 		if (pframe >= segs[try].start) {
    942 			/* was try correct? */
    943 			if (pframe < segs[try].end) {
    944 				if (offp)
    945 					*offp = pframe - segs[try].start;
    946 				return(try);            /* got it */
    947 			}
    948 			start = try + 1;	/* next time, start here */
    949 			len--;			/* "adjust" */
    950 		} else {
    951 			/*
    952 			 * pframe before try, just reduce length of
    953 			 * region, done in "for" loop
    954 			 */
    955 		}
    956 	}
    957 	return(-1);
    958 }
    959 
    960 #else
    961 
    962 static inline int
    963 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    964 {
    965 	/* linear search for it */
    966 	int	lcv;
    967 
    968 	for (lcv = 0; lcv < nsegs; lcv++) {
    969 		if (pframe >= segs[lcv].start &&
    970 		    pframe < segs[lcv].end) {
    971 			if (offp)
    972 				*offp = pframe - segs[lcv].start;
    973 			return(lcv);		   /* got it */
    974 		}
    975 	}
    976 	return(-1);
    977 }
    978 #endif
    979 
    980 /*
    981  * uvm_page_recolor: Recolor the pages if the new bucket count is
    982  * larger than the old one.
    983  */
    984 
    985 void
    986 uvm_page_recolor(int newncolors)
    987 {
    988 	struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
    989 	struct pgfreelist gpgfl, pgfl;
    990 	struct vm_page *pg;
    991 	vsize_t bucketcount;
    992 	int lcv, color, i, ocolors;
    993 	struct uvm_cpu *ucpu;
    994 
    995 	if (newncolors <= uvmexp.ncolors)
    996 		return;
    997 
    998 	if (uvm.page_init_done == false) {
    999 		uvmexp.ncolors = newncolors;
   1000 		return;
   1001 	}
   1002 
   1003 	bucketcount = newncolors * VM_NFREELIST;
   1004 	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2,
   1005 	    M_VMPAGE, M_NOWAIT);
   1006 	cpuarray = bucketarray + bucketcount;
   1007 	if (bucketarray == NULL) {
   1008 		printf("WARNING: unable to allocate %ld page color buckets\n",
   1009 		    (long) bucketcount);
   1010 		return;
   1011 	}
   1012 
   1013 	mutex_spin_enter(&uvm_fpageqlock);
   1014 
   1015 	/* Make sure we should still do this. */
   1016 	if (newncolors <= uvmexp.ncolors) {
   1017 		mutex_spin_exit(&uvm_fpageqlock);
   1018 		free(bucketarray, M_VMPAGE);
   1019 		return;
   1020 	}
   1021 
   1022 	oldbucketarray = uvm.page_free[0].pgfl_buckets;
   1023 	ocolors = uvmexp.ncolors;
   1024 
   1025 	uvmexp.ncolors = newncolors;
   1026 	uvmexp.colormask = uvmexp.ncolors - 1;
   1027 
   1028 	ucpu = curcpu()->ci_data.cpu_uvm;
   1029 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1030 		gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
   1031 		pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
   1032 		uvm_page_init_buckets(&gpgfl);
   1033 		uvm_page_init_buckets(&pgfl);
   1034 		for (color = 0; color < ocolors; color++) {
   1035 			for (i = 0; i < PGFL_NQUEUES; i++) {
   1036 				while ((pg = LIST_FIRST(&uvm.page_free[
   1037 				    lcv].pgfl_buckets[color].pgfl_queues[i]))
   1038 				    != NULL) {
   1039 					LIST_REMOVE(pg, pageq.list); /* global */
   1040 					LIST_REMOVE(pg, listq.list); /* cpu */
   1041 					LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
   1042 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
   1043 					    i], pg, pageq.list);
   1044 					LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
   1045 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
   1046 					    i], pg, listq.list);
   1047 				}
   1048 			}
   1049 		}
   1050 		uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
   1051 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
   1052 	}
   1053 
   1054 	if (have_recolored_pages) {
   1055 		mutex_spin_exit(&uvm_fpageqlock);
   1056 		free(oldbucketarray, M_VMPAGE);
   1057 		return;
   1058 	}
   1059 
   1060 	have_recolored_pages = true;
   1061 	mutex_spin_exit(&uvm_fpageqlock);
   1062 }
   1063 
   1064 /*
   1065  * uvm_cpu_attach: initialize per-CPU data structures.
   1066  */
   1067 
   1068 void
   1069 uvm_cpu_attach(struct cpu_info *ci)
   1070 {
   1071 	struct pgflbucket *bucketarray;
   1072 	struct pgfreelist pgfl;
   1073 	struct uvm_cpu *ucpu;
   1074 	vsize_t bucketcount;
   1075 	int lcv;
   1076 
   1077 	if (CPU_IS_PRIMARY(ci)) {
   1078 		/* Already done in uvm_page_init(). */
   1079 		return;
   1080 	}
   1081 
   1082 	/* Add more reserve pages for this CPU. */
   1083 	uvmexp.reserve_kernel += vm_page_reserve_kernel;
   1084 
   1085 	/* Configure this CPU's free lists. */
   1086 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
   1087 	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
   1088 	    M_VMPAGE, M_WAITOK);
   1089 	ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
   1090 	uvm.cpus[cpu_index(ci)] = ucpu;
   1091 	ci->ci_data.cpu_uvm = ucpu;
   1092 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1093 		pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
   1094 		uvm_page_init_buckets(&pgfl);
   1095 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
   1096 	}
   1097 }
   1098 
   1099 /*
   1100  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
   1101  */
   1102 
   1103 static struct vm_page *
   1104 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
   1105     int *trycolorp)
   1106 {
   1107 	struct pgflist *freeq;
   1108 	struct vm_page *pg;
   1109 	int color, trycolor = *trycolorp;
   1110 	struct pgfreelist *gpgfl, *pgfl;
   1111 
   1112 	KASSERT(mutex_owned(&uvm_fpageqlock));
   1113 
   1114 	color = trycolor;
   1115 	pgfl = &ucpu->page_free[flist];
   1116 	gpgfl = &uvm.page_free[flist];
   1117 	do {
   1118 		/* cpu, try1 */
   1119 		if ((pg = LIST_FIRST((freeq =
   1120 		    &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
   1121 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
   1122 		    	uvmexp.cpuhit++;
   1123 			goto gotit;
   1124 		}
   1125 		/* global, try1 */
   1126 		if ((pg = LIST_FIRST((freeq =
   1127 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
   1128 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
   1129 		    	uvmexp.cpumiss++;
   1130 			goto gotit;
   1131 		}
   1132 		/* cpu, try2 */
   1133 		if ((pg = LIST_FIRST((freeq =
   1134 		    &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1135 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1136 		    	uvmexp.cpuhit++;
   1137 			goto gotit;
   1138 		}
   1139 		/* global, try2 */
   1140 		if ((pg = LIST_FIRST((freeq =
   1141 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1142 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1143 		    	uvmexp.cpumiss++;
   1144 			goto gotit;
   1145 		}
   1146 		color = (color + 1) & uvmexp.colormask;
   1147 	} while (color != trycolor);
   1148 
   1149 	return (NULL);
   1150 
   1151  gotit:
   1152 	LIST_REMOVE(pg, pageq.list);	/* global list */
   1153 	LIST_REMOVE(pg, listq.list);	/* per-cpu list */
   1154 	uvmexp.free--;
   1155 
   1156 	/* update zero'd page count */
   1157 	if (pg->flags & PG_ZERO)
   1158 		uvmexp.zeropages--;
   1159 
   1160 	if (color == trycolor)
   1161 		uvmexp.colorhit++;
   1162 	else {
   1163 		uvmexp.colormiss++;
   1164 		*trycolorp = color;
   1165 	}
   1166 
   1167 	return (pg);
   1168 }
   1169 
   1170 /*
   1171  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
   1172  *
   1173  * => return null if no pages free
   1174  * => wake up pagedaemon if number of free pages drops below low water mark
   1175  * => if obj != NULL, obj must be locked (to put in obj's tree)
   1176  * => if anon != NULL, anon must be locked (to put in anon)
   1177  * => only one of obj or anon can be non-null
   1178  * => caller must activate/deactivate page if it is not wired.
   1179  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
   1180  * => policy decision: it is more important to pull a page off of the
   1181  *	appropriate priority free list than it is to get a zero'd or
   1182  *	unknown contents page.  This is because we live with the
   1183  *	consequences of a bad free list decision for the entire
   1184  *	lifetime of the page, e.g. if the page comes from memory that
   1185  *	is slower to access.
   1186  */
   1187 
   1188 struct vm_page *
   1189 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
   1190     int flags, int strat, int free_list)
   1191 {
   1192 	int lcv, try1, try2, zeroit = 0, color;
   1193 	struct uvm_cpu *ucpu;
   1194 	struct vm_page *pg;
   1195 	lwp_t *l;
   1196 
   1197 	KASSERT(obj == NULL || anon == NULL);
   1198 	KASSERT(anon == NULL || off == 0);
   1199 	KASSERT(off == trunc_page(off));
   1200 	KASSERT(obj == NULL || mutex_owned(&obj->vmobjlock));
   1201 	KASSERT(anon == NULL || mutex_owned(&anon->an_lock));
   1202 
   1203 	mutex_spin_enter(&uvm_fpageqlock);
   1204 
   1205 	/*
   1206 	 * This implements a global round-robin page coloring
   1207 	 * algorithm.
   1208 	 *
   1209 	 * XXXJRT: What about virtually-indexed caches?
   1210 	 */
   1211 
   1212 	ucpu = curcpu()->ci_data.cpu_uvm;
   1213 	color = ucpu->page_free_nextcolor;
   1214 
   1215 	/*
   1216 	 * check to see if we need to generate some free pages waking
   1217 	 * the pagedaemon.
   1218 	 */
   1219 
   1220 	uvm_kick_pdaemon();
   1221 
   1222 	/*
   1223 	 * fail if any of these conditions is true:
   1224 	 * [1]  there really are no free pages, or
   1225 	 * [2]  only kernel "reserved" pages remain and
   1226 	 *        reserved pages have not been requested.
   1227 	 * [3]  only pagedaemon "reserved" pages remain and
   1228 	 *        the requestor isn't the pagedaemon.
   1229 	 * we make kernel reserve pages available if called by a
   1230 	 * kernel thread or a realtime thread.
   1231 	 */
   1232 	l = curlwp;
   1233 	if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
   1234 		flags |= UVM_PGA_USERESERVE;
   1235 	}
   1236 	if ((uvmexp.free <= uvmexp.reserve_kernel &&
   1237 	    (flags & UVM_PGA_USERESERVE) == 0) ||
   1238 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
   1239 	     curlwp != uvm.pagedaemon_lwp))
   1240 		goto fail;
   1241 
   1242 #if PGFL_NQUEUES != 2
   1243 #error uvm_pagealloc_strat needs to be updated
   1244 #endif
   1245 
   1246 	/*
   1247 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
   1248 	 * we try the UNKNOWN queue first.
   1249 	 */
   1250 	if (flags & UVM_PGA_ZERO) {
   1251 		try1 = PGFL_ZEROS;
   1252 		try2 = PGFL_UNKNOWN;
   1253 	} else {
   1254 		try1 = PGFL_UNKNOWN;
   1255 		try2 = PGFL_ZEROS;
   1256 	}
   1257 
   1258  again:
   1259 	switch (strat) {
   1260 	case UVM_PGA_STRAT_NORMAL:
   1261 		/* Check freelists: descending priority (ascending id) order */
   1262 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1263 			pg = uvm_pagealloc_pgfl(ucpu, lcv,
   1264 			    try1, try2, &color);
   1265 			if (pg != NULL)
   1266 				goto gotit;
   1267 		}
   1268 
   1269 		/* No pages free! */
   1270 		goto fail;
   1271 
   1272 	case UVM_PGA_STRAT_ONLY:
   1273 	case UVM_PGA_STRAT_FALLBACK:
   1274 		/* Attempt to allocate from the specified free list. */
   1275 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1276 		pg = uvm_pagealloc_pgfl(ucpu, free_list,
   1277 		    try1, try2, &color);
   1278 		if (pg != NULL)
   1279 			goto gotit;
   1280 
   1281 		/* Fall back, if possible. */
   1282 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1283 			strat = UVM_PGA_STRAT_NORMAL;
   1284 			goto again;
   1285 		}
   1286 
   1287 		/* No pages free! */
   1288 		goto fail;
   1289 
   1290 	default:
   1291 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1292 		/* NOTREACHED */
   1293 	}
   1294 
   1295  gotit:
   1296 	/*
   1297 	 * We now know which color we actually allocated from; set
   1298 	 * the next color accordingly.
   1299 	 */
   1300 
   1301 	ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
   1302 
   1303 	/*
   1304 	 * update allocation statistics and remember if we have to
   1305 	 * zero the page
   1306 	 */
   1307 
   1308 	if (flags & UVM_PGA_ZERO) {
   1309 		if (pg->flags & PG_ZERO) {
   1310 			uvmexp.pga_zerohit++;
   1311 			zeroit = 0;
   1312 		} else {
   1313 			uvmexp.pga_zeromiss++;
   1314 			zeroit = 1;
   1315 		}
   1316 		if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1317 			ucpu->page_idle_zero = vm_page_zero_enable;
   1318 		}
   1319 	}
   1320 	KASSERT(pg->pqflags == PQ_FREE);
   1321 
   1322 	pg->offset = off;
   1323 	pg->uobject = obj;
   1324 	pg->uanon = anon;
   1325 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1326 	if (anon) {
   1327 		anon->an_page = pg;
   1328 		pg->pqflags = PQ_ANON;
   1329 		atomic_inc_uint(&uvmexp.anonpages);
   1330 	} else {
   1331 		if (obj) {
   1332 			uvm_pageinsert(obj, pg);
   1333 		}
   1334 		pg->pqflags = 0;
   1335 	}
   1336 	mutex_spin_exit(&uvm_fpageqlock);
   1337 
   1338 #if defined(UVM_PAGE_TRKOWN)
   1339 	pg->owner_tag = NULL;
   1340 #endif
   1341 	UVM_PAGE_OWN(pg, "new alloc");
   1342 
   1343 	if (flags & UVM_PGA_ZERO) {
   1344 		/*
   1345 		 * A zero'd page is not clean.  If we got a page not already
   1346 		 * zero'd, then we have to zero it ourselves.
   1347 		 */
   1348 		pg->flags &= ~PG_CLEAN;
   1349 		if (zeroit)
   1350 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1351 	}
   1352 
   1353 	return(pg);
   1354 
   1355  fail:
   1356 	mutex_spin_exit(&uvm_fpageqlock);
   1357 	return (NULL);
   1358 }
   1359 
   1360 /*
   1361  * uvm_pagereplace: replace a page with another
   1362  *
   1363  * => object must be locked
   1364  */
   1365 
   1366 void
   1367 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
   1368 {
   1369 	struct uvm_object *uobj = oldpg->uobject;
   1370 
   1371 	KASSERT((oldpg->flags & PG_TABLED) != 0);
   1372 	KASSERT(uobj != NULL);
   1373 	KASSERT((newpg->flags & PG_TABLED) == 0);
   1374 	KASSERT(newpg->uobject == NULL);
   1375 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1376 
   1377 	newpg->uobject = uobj;
   1378 	newpg->offset = oldpg->offset;
   1379 
   1380 	uvm_pageremove_tree(uobj, oldpg);
   1381 	uvm_pageinsert_tree(uobj, newpg);
   1382 	uvm_pageinsert_list(uobj, newpg, oldpg);
   1383 	uvm_pageremove_list(uobj, oldpg);
   1384 }
   1385 
   1386 /*
   1387  * uvm_pagerealloc: reallocate a page from one object to another
   1388  *
   1389  * => both objects must be locked
   1390  */
   1391 
   1392 void
   1393 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
   1394 {
   1395 	/*
   1396 	 * remove it from the old object
   1397 	 */
   1398 
   1399 	if (pg->uobject) {
   1400 		uvm_pageremove(pg->uobject, pg);
   1401 	}
   1402 
   1403 	/*
   1404 	 * put it in the new object
   1405 	 */
   1406 
   1407 	if (newobj) {
   1408 		pg->uobject = newobj;
   1409 		pg->offset = newoff;
   1410 		uvm_pageinsert(newobj, pg);
   1411 	}
   1412 }
   1413 
   1414 #ifdef DEBUG
   1415 /*
   1416  * check if page is zero-filled
   1417  *
   1418  *  - called with free page queue lock held.
   1419  */
   1420 void
   1421 uvm_pagezerocheck(struct vm_page *pg)
   1422 {
   1423 	int *p, *ep;
   1424 
   1425 	KASSERT(uvm_zerocheckkva != 0);
   1426 	KASSERT(mutex_owned(&uvm_fpageqlock));
   1427 
   1428 	/*
   1429 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
   1430 	 * uvm page allocator.
   1431 	 *
   1432 	 * it might be better to have "CPU-local temporary map" pmap interface.
   1433 	 */
   1434 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
   1435 	p = (int *)uvm_zerocheckkva;
   1436 	ep = (int *)((char *)p + PAGE_SIZE);
   1437 	pmap_update(pmap_kernel());
   1438 	while (p < ep) {
   1439 		if (*p != 0)
   1440 			panic("PG_ZERO page isn't zero-filled");
   1441 		p++;
   1442 	}
   1443 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
   1444 	/*
   1445 	 * pmap_update() is not necessary here because no one except us
   1446 	 * uses this VA.
   1447 	 */
   1448 }
   1449 #endif /* DEBUG */
   1450 
   1451 /*
   1452  * uvm_pagefree: free page
   1453  *
   1454  * => erase page's identity (i.e. remove from object)
   1455  * => put page on free list
   1456  * => caller must lock owning object (either anon or uvm_object)
   1457  * => caller must lock page queues
   1458  * => assumes all valid mappings of pg are gone
   1459  */
   1460 
   1461 void
   1462 uvm_pagefree(struct vm_page *pg)
   1463 {
   1464 	struct pgflist *pgfl;
   1465 	struct uvm_cpu *ucpu;
   1466 	int index, color, queue;
   1467 	bool iszero;
   1468 
   1469 #ifdef DEBUG
   1470 	if (pg->uobject == (void *)0xdeadbeef &&
   1471 	    pg->uanon == (void *)0xdeadbeef) {
   1472 		panic("uvm_pagefree: freeing free page %p", pg);
   1473 	}
   1474 #endif /* DEBUG */
   1475 
   1476 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1477 	KASSERT(!(pg->pqflags & PQ_FREE));
   1478 	KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
   1479 	KASSERT(pg->uobject == NULL || mutex_owned(&pg->uobject->vmobjlock));
   1480 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1481 		mutex_owned(&pg->uanon->an_lock));
   1482 
   1483 	/*
   1484 	 * if the page is loaned, resolve the loan instead of freeing.
   1485 	 */
   1486 
   1487 	if (pg->loan_count) {
   1488 		KASSERT(pg->wire_count == 0);
   1489 
   1490 		/*
   1491 		 * if the page is owned by an anon then we just want to
   1492 		 * drop anon ownership.  the kernel will free the page when
   1493 		 * it is done with it.  if the page is owned by an object,
   1494 		 * remove it from the object and mark it dirty for the benefit
   1495 		 * of possible anon owners.
   1496 		 *
   1497 		 * regardless of previous ownership, wakeup any waiters,
   1498 		 * unbusy the page, and we're done.
   1499 		 */
   1500 
   1501 		if (pg->uobject != NULL) {
   1502 			uvm_pageremove(pg->uobject, pg);
   1503 			pg->flags &= ~PG_CLEAN;
   1504 		} else if (pg->uanon != NULL) {
   1505 			if ((pg->pqflags & PQ_ANON) == 0) {
   1506 				pg->loan_count--;
   1507 			} else {
   1508 				pg->pqflags &= ~PQ_ANON;
   1509 				atomic_dec_uint(&uvmexp.anonpages);
   1510 			}
   1511 			pg->uanon->an_page = NULL;
   1512 			pg->uanon = NULL;
   1513 		}
   1514 		if (pg->flags & PG_WANTED) {
   1515 			wakeup(pg);
   1516 		}
   1517 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
   1518 #ifdef UVM_PAGE_TRKOWN
   1519 		pg->owner_tag = NULL;
   1520 #endif
   1521 		if (pg->loan_count) {
   1522 			KASSERT(pg->uobject == NULL);
   1523 			if (pg->uanon == NULL) {
   1524 				uvm_pagedequeue(pg);
   1525 			}
   1526 			return;
   1527 		}
   1528 	}
   1529 
   1530 	/*
   1531 	 * remove page from its object or anon.
   1532 	 */
   1533 
   1534 	if (pg->uobject != NULL) {
   1535 		uvm_pageremove(pg->uobject, pg);
   1536 	} else if (pg->uanon != NULL) {
   1537 		pg->uanon->an_page = NULL;
   1538 		atomic_dec_uint(&uvmexp.anonpages);
   1539 	}
   1540 
   1541 	/*
   1542 	 * now remove the page from the queues.
   1543 	 */
   1544 
   1545 	uvm_pagedequeue(pg);
   1546 
   1547 	/*
   1548 	 * if the page was wired, unwire it now.
   1549 	 */
   1550 
   1551 	if (pg->wire_count) {
   1552 		pg->wire_count = 0;
   1553 		uvmexp.wired--;
   1554 	}
   1555 
   1556 	/*
   1557 	 * and put on free queue
   1558 	 */
   1559 
   1560 	iszero = (pg->flags & PG_ZERO);
   1561 	index = uvm_page_lookup_freelist(pg);
   1562 	color = VM_PGCOLOR_BUCKET(pg);
   1563 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
   1564 
   1565 #ifdef DEBUG
   1566 	pg->uobject = (void *)0xdeadbeef;
   1567 	pg->uanon = (void *)0xdeadbeef;
   1568 #endif
   1569 
   1570 	mutex_spin_enter(&uvm_fpageqlock);
   1571 	pg->pqflags = PQ_FREE;
   1572 
   1573 #ifdef DEBUG
   1574 	if (iszero)
   1575 		uvm_pagezerocheck(pg);
   1576 #endif /* DEBUG */
   1577 
   1578 
   1579 	/* global list */
   1580 	pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1581 	LIST_INSERT_HEAD(pgfl, pg, pageq.list);
   1582 	uvmexp.free++;
   1583 	if (iszero) {
   1584 		uvmexp.zeropages++;
   1585 	}
   1586 
   1587 	/* per-cpu list */
   1588 	ucpu = curcpu()->ci_data.cpu_uvm;
   1589 	pg->offset = (uintptr_t)ucpu;
   1590 	pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1591 	LIST_INSERT_HEAD(pgfl, pg, listq.list);
   1592 	ucpu->pages[queue]++;
   1593 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1594 		ucpu->page_idle_zero = vm_page_zero_enable;
   1595 	}
   1596 
   1597 	mutex_spin_exit(&uvm_fpageqlock);
   1598 }
   1599 
   1600 /*
   1601  * uvm_page_unbusy: unbusy an array of pages.
   1602  *
   1603  * => pages must either all belong to the same object, or all belong to anons.
   1604  * => if pages are object-owned, object must be locked.
   1605  * => if pages are anon-owned, anons must be locked.
   1606  * => caller must lock page queues if pages may be released.
   1607  * => caller must make sure that anon-owned pages are not PG_RELEASED.
   1608  */
   1609 
   1610 void
   1611 uvm_page_unbusy(struct vm_page **pgs, int npgs)
   1612 {
   1613 	struct vm_page *pg;
   1614 	int i;
   1615 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1616 
   1617 	for (i = 0; i < npgs; i++) {
   1618 		pg = pgs[i];
   1619 		if (pg == NULL || pg == PGO_DONTCARE) {
   1620 			continue;
   1621 		}
   1622 
   1623 		KASSERT(pg->uobject == NULL ||
   1624 		    mutex_owned(&pg->uobject->vmobjlock));
   1625 		KASSERT(pg->uobject != NULL ||
   1626 		    (pg->uanon != NULL && mutex_owned(&pg->uanon->an_lock)));
   1627 
   1628 		KASSERT(pg->flags & PG_BUSY);
   1629 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1630 		if (pg->flags & PG_WANTED) {
   1631 			wakeup(pg);
   1632 		}
   1633 		if (pg->flags & PG_RELEASED) {
   1634 			UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
   1635 			KASSERT(pg->uobject != NULL ||
   1636 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
   1637 			pg->flags &= ~PG_RELEASED;
   1638 			uvm_pagefree(pg);
   1639 		} else {
   1640 			UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
   1641 			KASSERT((pg->flags & PG_FAKE) == 0);
   1642 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1643 			UVM_PAGE_OWN(pg, NULL);
   1644 		}
   1645 	}
   1646 }
   1647 
   1648 #if defined(UVM_PAGE_TRKOWN)
   1649 /*
   1650  * uvm_page_own: set or release page ownership
   1651  *
   1652  * => this is a debugging function that keeps track of who sets PG_BUSY
   1653  *	and where they do it.   it can be used to track down problems
   1654  *	such a process setting "PG_BUSY" and never releasing it.
   1655  * => page's object [if any] must be locked
   1656  * => if "tag" is NULL then we are releasing page ownership
   1657  */
   1658 void
   1659 uvm_page_own(struct vm_page *pg, const char *tag)
   1660 {
   1661 	struct uvm_object *uobj;
   1662 	struct vm_anon *anon;
   1663 
   1664 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1665 
   1666 	uobj = pg->uobject;
   1667 	anon = pg->uanon;
   1668 	if (uobj != NULL) {
   1669 		KASSERT(mutex_owned(&uobj->vmobjlock));
   1670 	} else if (anon != NULL) {
   1671 		KASSERT(mutex_owned(&anon->an_lock));
   1672 	}
   1673 
   1674 	KASSERT((pg->flags & PG_WANTED) == 0);
   1675 
   1676 	/* gain ownership? */
   1677 	if (tag) {
   1678 		KASSERT((pg->flags & PG_BUSY) != 0);
   1679 		if (pg->owner_tag) {
   1680 			printf("uvm_page_own: page %p already owned "
   1681 			    "by proc %d [%s]\n", pg,
   1682 			    pg->owner, pg->owner_tag);
   1683 			panic("uvm_page_own");
   1684 		}
   1685 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
   1686 		pg->lowner = (curlwp) ? curlwp->l_lid :  (lwpid_t) -1;
   1687 		pg->owner_tag = tag;
   1688 		return;
   1689 	}
   1690 
   1691 	/* drop ownership */
   1692 	KASSERT((pg->flags & PG_BUSY) == 0);
   1693 	if (pg->owner_tag == NULL) {
   1694 		printf("uvm_page_own: dropping ownership of an non-owned "
   1695 		    "page (%p)\n", pg);
   1696 		panic("uvm_page_own");
   1697 	}
   1698 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1699 		KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
   1700 		    pg->wire_count > 0);
   1701 	} else {
   1702 		KASSERT(pg->wire_count == 0);
   1703 	}
   1704 	pg->owner_tag = NULL;
   1705 }
   1706 #endif
   1707 
   1708 /*
   1709  * uvm_pageidlezero: zero free pages while the system is idle.
   1710  *
   1711  * => try to complete one color bucket at a time, to reduce our impact
   1712  *	on the CPU cache.
   1713  * => we loop until we either reach the target or there is a lwp ready
   1714  *      to run, or MD code detects a reason to break early.
   1715  */
   1716 void
   1717 uvm_pageidlezero(void)
   1718 {
   1719 	struct vm_page *pg;
   1720 	struct pgfreelist *pgfl, *gpgfl;
   1721 	struct uvm_cpu *ucpu;
   1722 	int free_list, firstbucket, nextbucket;
   1723 
   1724 	ucpu = curcpu()->ci_data.cpu_uvm;
   1725 	if (!ucpu->page_idle_zero ||
   1726 	    ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1727 	    	ucpu->page_idle_zero = false;
   1728 		return;
   1729 	}
   1730 	mutex_enter(&uvm_fpageqlock);
   1731 	firstbucket = ucpu->page_free_nextcolor;
   1732 	nextbucket = firstbucket;
   1733 	do {
   1734 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
   1735 			if (sched_curcpu_runnable_p()) {
   1736 				goto quit;
   1737 			}
   1738 			pgfl = &ucpu->page_free[free_list];
   1739 			gpgfl = &uvm.page_free[free_list];
   1740 			while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
   1741 			    nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
   1742 				if (sched_curcpu_runnable_p()) {
   1743 					goto quit;
   1744 				}
   1745 				LIST_REMOVE(pg, pageq.list); /* global list */
   1746 				LIST_REMOVE(pg, listq.list); /* per-cpu list */
   1747 				ucpu->pages[PGFL_UNKNOWN]--;
   1748 				uvmexp.free--;
   1749 				KASSERT(pg->pqflags == PQ_FREE);
   1750 				pg->pqflags = 0;
   1751 				mutex_spin_exit(&uvm_fpageqlock);
   1752 #ifdef PMAP_PAGEIDLEZERO
   1753 				if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
   1754 
   1755 					/*
   1756 					 * The machine-dependent code detected
   1757 					 * some reason for us to abort zeroing
   1758 					 * pages, probably because there is a
   1759 					 * process now ready to run.
   1760 					 */
   1761 
   1762 					mutex_spin_enter(&uvm_fpageqlock);
   1763 					pg->pqflags = PQ_FREE;
   1764 					LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1765 					    nextbucket].pgfl_queues[
   1766 					    PGFL_UNKNOWN], pg, pageq.list);
   1767 					LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1768 					    nextbucket].pgfl_queues[
   1769 					    PGFL_UNKNOWN], pg, listq.list);
   1770 					ucpu->pages[PGFL_UNKNOWN]++;
   1771 					uvmexp.free++;
   1772 					uvmexp.zeroaborts++;
   1773 					goto quit;
   1774 				}
   1775 #else
   1776 				pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1777 #endif /* PMAP_PAGEIDLEZERO */
   1778 				pg->flags |= PG_ZERO;
   1779 
   1780 				mutex_spin_enter(&uvm_fpageqlock);
   1781 				pg->pqflags = PQ_FREE;
   1782 				LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1783 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1784 				    pg, pageq.list);
   1785 				LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1786 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1787 				    pg, listq.list);
   1788 				ucpu->pages[PGFL_ZEROS]++;
   1789 				uvmexp.free++;
   1790 				uvmexp.zeropages++;
   1791 			}
   1792 		}
   1793 		if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1794 			break;
   1795 		}
   1796 		nextbucket = (nextbucket + 1) & uvmexp.colormask;
   1797 	} while (nextbucket != firstbucket);
   1798 	ucpu->page_idle_zero = false;
   1799  quit:
   1800 	mutex_spin_exit(&uvm_fpageqlock);
   1801 }
   1802 
   1803 /*
   1804  * uvm_pagelookup: look up a page
   1805  *
   1806  * => caller should lock object to keep someone from pulling the page
   1807  *	out from under it
   1808  */
   1809 
   1810 struct vm_page *
   1811 uvm_pagelookup(struct uvm_object *obj, voff_t off)
   1812 {
   1813 	struct vm_page *pg;
   1814 
   1815 	KASSERT(mutex_owned(&obj->vmobjlock));
   1816 
   1817 	pg = rb_tree_find_node(&obj->rb_tree, &off);
   1818 
   1819 	KASSERT(pg == NULL || obj->uo_npages != 0);
   1820 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1821 		(pg->flags & PG_BUSY) != 0);
   1822 	return pg;
   1823 }
   1824 
   1825 /*
   1826  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
   1827  *
   1828  * => caller must lock page queues
   1829  */
   1830 
   1831 void
   1832 uvm_pagewire(struct vm_page *pg)
   1833 {
   1834 	KASSERT(mutex_owned(&uvm_pageqlock));
   1835 #if defined(READAHEAD_STATS)
   1836 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1837 		uvm_ra_hit.ev_count++;
   1838 		pg->pqflags &= ~PQ_READAHEAD;
   1839 	}
   1840 #endif /* defined(READAHEAD_STATS) */
   1841 	if (pg->wire_count == 0) {
   1842 		uvm_pagedequeue(pg);
   1843 		uvmexp.wired++;
   1844 	}
   1845 	pg->wire_count++;
   1846 }
   1847 
   1848 /*
   1849  * uvm_pageunwire: unwire the page.
   1850  *
   1851  * => activate if wire count goes to zero.
   1852  * => caller must lock page queues
   1853  */
   1854 
   1855 void
   1856 uvm_pageunwire(struct vm_page *pg)
   1857 {
   1858 	KASSERT(mutex_owned(&uvm_pageqlock));
   1859 	pg->wire_count--;
   1860 	if (pg->wire_count == 0) {
   1861 		uvm_pageactivate(pg);
   1862 		uvmexp.wired--;
   1863 	}
   1864 }
   1865 
   1866 /*
   1867  * uvm_pagedeactivate: deactivate page
   1868  *
   1869  * => caller must lock page queues
   1870  * => caller must check to make sure page is not wired
   1871  * => object that page belongs to must be locked (so we can adjust pg->flags)
   1872  * => caller must clear the reference on the page before calling
   1873  */
   1874 
   1875 void
   1876 uvm_pagedeactivate(struct vm_page *pg)
   1877 {
   1878 
   1879 	KASSERT(mutex_owned(&uvm_pageqlock));
   1880 	KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
   1881 	uvmpdpol_pagedeactivate(pg);
   1882 }
   1883 
   1884 /*
   1885  * uvm_pageactivate: activate page
   1886  *
   1887  * => caller must lock page queues
   1888  */
   1889 
   1890 void
   1891 uvm_pageactivate(struct vm_page *pg)
   1892 {
   1893 
   1894 	KASSERT(mutex_owned(&uvm_pageqlock));
   1895 #if defined(READAHEAD_STATS)
   1896 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1897 		uvm_ra_hit.ev_count++;
   1898 		pg->pqflags &= ~PQ_READAHEAD;
   1899 	}
   1900 #endif /* defined(READAHEAD_STATS) */
   1901 	if (pg->wire_count != 0) {
   1902 		return;
   1903 	}
   1904 	uvmpdpol_pageactivate(pg);
   1905 }
   1906 
   1907 /*
   1908  * uvm_pagedequeue: remove a page from any paging queue
   1909  */
   1910 
   1911 void
   1912 uvm_pagedequeue(struct vm_page *pg)
   1913 {
   1914 
   1915 	if (uvmpdpol_pageisqueued_p(pg)) {
   1916 		KASSERT(mutex_owned(&uvm_pageqlock));
   1917 	}
   1918 
   1919 	uvmpdpol_pagedequeue(pg);
   1920 }
   1921 
   1922 /*
   1923  * uvm_pageenqueue: add a page to a paging queue without activating.
   1924  * used where a page is not really demanded (yet).  eg. read-ahead
   1925  */
   1926 
   1927 void
   1928 uvm_pageenqueue(struct vm_page *pg)
   1929 {
   1930 
   1931 	KASSERT(mutex_owned(&uvm_pageqlock));
   1932 	if (pg->wire_count != 0) {
   1933 		return;
   1934 	}
   1935 	uvmpdpol_pageenqueue(pg);
   1936 }
   1937 
   1938 /*
   1939  * uvm_pagezero: zero fill a page
   1940  *
   1941  * => if page is part of an object then the object should be locked
   1942  *	to protect pg->flags.
   1943  */
   1944 
   1945 void
   1946 uvm_pagezero(struct vm_page *pg)
   1947 {
   1948 	pg->flags &= ~PG_CLEAN;
   1949 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1950 }
   1951 
   1952 /*
   1953  * uvm_pagecopy: copy a page
   1954  *
   1955  * => if page is part of an object then the object should be locked
   1956  *	to protect pg->flags.
   1957  */
   1958 
   1959 void
   1960 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
   1961 {
   1962 
   1963 	dst->flags &= ~PG_CLEAN;
   1964 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
   1965 }
   1966 
   1967 /*
   1968  * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
   1969  */
   1970 
   1971 bool
   1972 uvm_pageismanaged(paddr_t pa)
   1973 {
   1974 
   1975 	return (vm_physseg_find(atop(pa), NULL) != -1);
   1976 }
   1977 
   1978 /*
   1979  * uvm_page_lookup_freelist: look up the free list for the specified page
   1980  */
   1981 
   1982 int
   1983 uvm_page_lookup_freelist(struct vm_page *pg)
   1984 {
   1985 	int lcv;
   1986 
   1987 	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
   1988 	KASSERT(lcv != -1);
   1989 	return (VM_PHYSMEM_PTR(lcv)->free_list);
   1990 }
   1991 
   1992 #if defined(DDB) || defined(DEBUGPRINT)
   1993 
   1994 /*
   1995  * uvm_page_printit: actually print the page
   1996  */
   1997 
   1998 static const char page_flagbits[] = UVM_PGFLAGBITS;
   1999 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
   2000 
   2001 void
   2002 uvm_page_printit(struct vm_page *pg, bool full,
   2003     void (*pr)(const char *, ...))
   2004 {
   2005 	struct vm_page *tpg;
   2006 	struct uvm_object *uobj;
   2007 	struct pgflist *pgl;
   2008 	char pgbuf[128];
   2009 	char pqbuf[128];
   2010 
   2011 	(*pr)("PAGE %p:\n", pg);
   2012 	snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
   2013 	snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
   2014 	(*pr)("  flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
   2015 	    pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
   2016 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
   2017 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
   2018 #if defined(UVM_PAGE_TRKOWN)
   2019 	if (pg->flags & PG_BUSY)
   2020 		(*pr)("  owning process = %d, tag=%s\n",
   2021 		    pg->owner, pg->owner_tag);
   2022 	else
   2023 		(*pr)("  page not busy, no owner\n");
   2024 #else
   2025 	(*pr)("  [page ownership tracking disabled]\n");
   2026 #endif
   2027 
   2028 	if (!full)
   2029 		return;
   2030 
   2031 	/* cross-verify object/anon */
   2032 	if ((pg->pqflags & PQ_FREE) == 0) {
   2033 		if (pg->pqflags & PQ_ANON) {
   2034 			if (pg->uanon == NULL || pg->uanon->an_page != pg)
   2035 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
   2036 				(pg->uanon) ? pg->uanon->an_page : NULL);
   2037 			else
   2038 				(*pr)("  anon backpointer is OK\n");
   2039 		} else {
   2040 			uobj = pg->uobject;
   2041 			if (uobj) {
   2042 				(*pr)("  checking object list\n");
   2043 				TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
   2044 					if (tpg == pg) {
   2045 						break;
   2046 					}
   2047 				}
   2048 				if (tpg)
   2049 					(*pr)("  page found on object list\n");
   2050 				else
   2051 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
   2052 			}
   2053 		}
   2054 	}
   2055 
   2056 	/* cross-verify page queue */
   2057 	if (pg->pqflags & PQ_FREE) {
   2058 		int fl = uvm_page_lookup_freelist(pg);
   2059 		int color = VM_PGCOLOR_BUCKET(pg);
   2060 		pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
   2061 		    ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
   2062 	} else {
   2063 		pgl = NULL;
   2064 	}
   2065 
   2066 	if (pgl) {
   2067 		(*pr)("  checking pageq list\n");
   2068 		LIST_FOREACH(tpg, pgl, pageq.list) {
   2069 			if (tpg == pg) {
   2070 				break;
   2071 			}
   2072 		}
   2073 		if (tpg)
   2074 			(*pr)("  page found on pageq list\n");
   2075 		else
   2076 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
   2077 	}
   2078 }
   2079 
   2080 /*
   2081  * uvm_pages_printthem - print a summary of all managed pages
   2082  */
   2083 
   2084 void
   2085 uvm_page_printall(void (*pr)(const char *, ...))
   2086 {
   2087 	unsigned i;
   2088 	struct vm_page *pg;
   2089 
   2090 	(*pr)("%18s %4s %4s %18s %18s"
   2091 #ifdef UVM_PAGE_TRKOWN
   2092 	    " OWNER"
   2093 #endif
   2094 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
   2095 	for (i = 0; i < vm_nphysmem; i++) {
   2096 		for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
   2097 			(*pr)("%18p %04x %04x %18p %18p",
   2098 			    pg, pg->flags, pg->pqflags, pg->uobject,
   2099 			    pg->uanon);
   2100 #ifdef UVM_PAGE_TRKOWN
   2101 			if (pg->flags & PG_BUSY)
   2102 				(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
   2103 #endif
   2104 			(*pr)("\n");
   2105 		}
   2106 	}
   2107 }
   2108 
   2109 #endif /* DDB || DEBUGPRINT */
   2110