Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.189
      1 /*	$NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     37  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     38  *
     39  *
     40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41  * All rights reserved.
     42  *
     43  * Permission to use, copy, modify and distribute this software and
     44  * its documentation is hereby granted, provided that both the copyright
     45  * notice and this permission notice appear in all copies of the
     46  * software, derivative works or modified versions, and any portions
     47  * thereof, and that both notices appear in supporting documentation.
     48  *
     49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52  *
     53  * Carnegie Mellon requests users of this software to return to
     54  *
     55  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56  *  School of Computer Science
     57  *  Carnegie Mellon University
     58  *  Pittsburgh PA 15213-3890
     59  *
     60  * any improvements or extensions that they make and grant Carnegie the
     61  * rights to redistribute these changes.
     62  */
     63 
     64 /*
     65  * uvm_page.c: page ops.
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.189 2016/12/22 16:05:15 cherry Exp $");
     70 
     71 #include "opt_ddb.h"
     72 #include "opt_uvm.h"
     73 #include "opt_uvmhist.h"
     74 #include "opt_readahead.h"
     75 
     76 #include <sys/param.h>
     77 #include <sys/systm.h>
     78 #include <sys/sched.h>
     79 #include <sys/kernel.h>
     80 #include <sys/vnode.h>
     81 #include <sys/proc.h>
     82 #include <sys/atomic.h>
     83 #include <sys/cpu.h>
     84 
     85 #include <uvm/uvm.h>
     86 #include <uvm/uvm_ddb.h>
     87 #include <uvm/uvm_pdpolicy.h>
     88 
     89 /*
     90  * global vars... XXXCDC: move to uvm. structure.
     91  */
     92 
     93 /*
     94  * physical memory config is stored in vm_physmem.
     95  */
     96 
     97 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
     98 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
     99 #define	vm_nphysmem	vm_nphysseg
    100 
    101 /*
    102  * Some supported CPUs in a given architecture don't support all
    103  * of the things necessary to do idle page zero'ing efficiently.
    104  * We therefore provide a way to enable it from machdep code here.
    105  */
    106 bool vm_page_zero_enable = false;
    107 
    108 /*
    109  * number of pages per-CPU to reserve for the kernel.
    110  */
    111 #ifndef	UVM_RESERVED_PAGES_PER_CPU
    112 #define	UVM_RESERVED_PAGES_PER_CPU	5
    113 #endif
    114 int vm_page_reserve_kernel = UVM_RESERVED_PAGES_PER_CPU;
    115 
    116 /*
    117  * physical memory size;
    118  */
    119 psize_t physmem;
    120 
    121 /*
    122  * local variables
    123  */
    124 
    125 /*
    126  * these variables record the values returned by vm_page_bootstrap,
    127  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    128  * and pmap_startup here also uses them internally.
    129  */
    130 
    131 static vaddr_t      virtual_space_start;
    132 static vaddr_t      virtual_space_end;
    133 
    134 /*
    135  * we allocate an initial number of page colors in uvm_page_init(),
    136  * and remember them.  We may re-color pages as cache sizes are
    137  * discovered during the autoconfiguration phase.  But we can never
    138  * free the initial set of buckets, since they are allocated using
    139  * uvm_pageboot_alloc().
    140  */
    141 
    142 static size_t recolored_pages_memsize /* = 0 */;
    143 
    144 #ifdef DEBUG
    145 vaddr_t uvm_zerocheckkva;
    146 #endif /* DEBUG */
    147 
    148 /*
    149  * local prototypes
    150  */
    151 
    152 static void uvm_pageinsert(struct uvm_object *, struct vm_page *);
    153 static void uvm_pageremove(struct uvm_object *, struct vm_page *);
    154 
    155 /*
    156  * per-object tree of pages
    157  */
    158 
    159 static signed int
    160 uvm_page_compare_nodes(void *ctx, const void *n1, const void *n2)
    161 {
    162 	const struct vm_page *pg1 = n1;
    163 	const struct vm_page *pg2 = n2;
    164 	const voff_t a = pg1->offset;
    165 	const voff_t b = pg2->offset;
    166 
    167 	if (a < b)
    168 		return -1;
    169 	if (a > b)
    170 		return 1;
    171 	return 0;
    172 }
    173 
    174 static signed int
    175 uvm_page_compare_key(void *ctx, const void *n, const void *key)
    176 {
    177 	const struct vm_page *pg = n;
    178 	const voff_t a = pg->offset;
    179 	const voff_t b = *(const voff_t *)key;
    180 
    181 	if (a < b)
    182 		return -1;
    183 	if (a > b)
    184 		return 1;
    185 	return 0;
    186 }
    187 
    188 const rb_tree_ops_t uvm_page_tree_ops = {
    189 	.rbto_compare_nodes = uvm_page_compare_nodes,
    190 	.rbto_compare_key = uvm_page_compare_key,
    191 	.rbto_node_offset = offsetof(struct vm_page, rb_node),
    192 	.rbto_context = NULL
    193 };
    194 
    195 /*
    196  * inline functions
    197  */
    198 
    199 /*
    200  * uvm_pageinsert: insert a page in the object.
    201  *
    202  * => caller must lock object
    203  * => caller must lock page queues
    204  * => call should have already set pg's object and offset pointers
    205  *    and bumped the version counter
    206  */
    207 
    208 static inline void
    209 uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
    210     struct vm_page *where)
    211 {
    212 
    213 	KASSERT(uobj == pg->uobject);
    214 	KASSERT(mutex_owned(uobj->vmobjlock));
    215 	KASSERT((pg->flags & PG_TABLED) == 0);
    216 	KASSERT(where == NULL || (where->flags & PG_TABLED));
    217 	KASSERT(where == NULL || (where->uobject == uobj));
    218 
    219 	if (UVM_OBJ_IS_VNODE(uobj)) {
    220 		if (uobj->uo_npages == 0) {
    221 			struct vnode *vp = (struct vnode *)uobj;
    222 
    223 			vholdl(vp);
    224 		}
    225 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    226 			atomic_inc_uint(&uvmexp.execpages);
    227 		} else {
    228 			atomic_inc_uint(&uvmexp.filepages);
    229 		}
    230 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    231 		atomic_inc_uint(&uvmexp.anonpages);
    232 	}
    233 
    234 	if (where)
    235 		TAILQ_INSERT_AFTER(&uobj->memq, where, pg, listq.queue);
    236 	else
    237 		TAILQ_INSERT_TAIL(&uobj->memq, pg, listq.queue);
    238 	pg->flags |= PG_TABLED;
    239 	uobj->uo_npages++;
    240 }
    241 
    242 
    243 static inline void
    244 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
    245 {
    246 	struct vm_page *ret __diagused;
    247 
    248 	KASSERT(uobj == pg->uobject);
    249 	ret = rb_tree_insert_node(&uobj->rb_tree, pg);
    250 	KASSERT(ret == pg);
    251 }
    252 
    253 static inline void
    254 uvm_pageinsert(struct uvm_object *uobj, struct vm_page *pg)
    255 {
    256 
    257 	KDASSERT(uobj != NULL);
    258 	uvm_pageinsert_tree(uobj, pg);
    259 	uvm_pageinsert_list(uobj, pg, NULL);
    260 }
    261 
    262 /*
    263  * uvm_page_remove: remove page from object.
    264  *
    265  * => caller must lock object
    266  * => caller must lock page queues
    267  */
    268 
    269 static inline void
    270 uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
    271 {
    272 
    273 	KASSERT(uobj == pg->uobject);
    274 	KASSERT(mutex_owned(uobj->vmobjlock));
    275 	KASSERT(pg->flags & PG_TABLED);
    276 
    277 	if (UVM_OBJ_IS_VNODE(uobj)) {
    278 		if (uobj->uo_npages == 1) {
    279 			struct vnode *vp = (struct vnode *)uobj;
    280 
    281 			holdrelel(vp);
    282 		}
    283 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    284 			atomic_dec_uint(&uvmexp.execpages);
    285 		} else {
    286 			atomic_dec_uint(&uvmexp.filepages);
    287 		}
    288 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    289 		atomic_dec_uint(&uvmexp.anonpages);
    290 	}
    291 
    292 	/* object should be locked */
    293 	uobj->uo_npages--;
    294 	TAILQ_REMOVE(&uobj->memq, pg, listq.queue);
    295 	pg->flags &= ~PG_TABLED;
    296 	pg->uobject = NULL;
    297 }
    298 
    299 static inline void
    300 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
    301 {
    302 
    303 	KASSERT(uobj == pg->uobject);
    304 	rb_tree_remove_node(&uobj->rb_tree, pg);
    305 }
    306 
    307 static inline void
    308 uvm_pageremove(struct uvm_object *uobj, struct vm_page *pg)
    309 {
    310 
    311 	KDASSERT(uobj != NULL);
    312 	uvm_pageremove_tree(uobj, pg);
    313 	uvm_pageremove_list(uobj, pg);
    314 }
    315 
    316 static void
    317 uvm_page_init_buckets(struct pgfreelist *pgfl)
    318 {
    319 	int color, i;
    320 
    321 	for (color = 0; color < uvmexp.ncolors; color++) {
    322 		for (i = 0; i < PGFL_NQUEUES; i++) {
    323 			LIST_INIT(&pgfl->pgfl_buckets[color].pgfl_queues[i]);
    324 		}
    325 	}
    326 }
    327 
    328 /*
    329  * uvm_page_init: init the page system.   called from uvm_init().
    330  *
    331  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    332  */
    333 
    334 void
    335 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
    336 {
    337 	static struct uvm_cpu boot_cpu;
    338 	psize_t freepages, pagecount, bucketcount, n;
    339 	struct pgflbucket *bucketarray, *cpuarray;
    340 	struct vm_physseg *seg;
    341 	struct vm_page *pagearray;
    342 	int lcv;
    343 	u_int i;
    344 	paddr_t paddr;
    345 
    346 	KASSERT(ncpu <= 1);
    347 	CTASSERT(sizeof(pagearray->offset) >= sizeof(struct uvm_cpu *));
    348 
    349 	/*
    350 	 * init the page queues and page queue locks, except the free
    351 	 * list; we allocate that later (with the initial vm_page
    352 	 * structures).
    353 	 */
    354 
    355 	uvm.cpus[0] = &boot_cpu;
    356 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
    357 	uvmpdpol_init();
    358 	mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
    359 	mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
    360 
    361 	/*
    362 	 * allocate vm_page structures.
    363 	 */
    364 
    365 	/*
    366 	 * sanity check:
    367 	 * before calling this function the MD code is expected to register
    368 	 * some free RAM with the uvm_page_physload() function.   our job
    369 	 * now is to allocate vm_page structures for this memory.
    370 	 */
    371 
    372 	if (vm_nphysmem == 0)
    373 		panic("uvm_page_bootstrap: no memory pre-allocated");
    374 
    375 	/*
    376 	 * first calculate the number of free pages...
    377 	 *
    378 	 * note that we use start/end rather than avail_start/avail_end.
    379 	 * this allows us to allocate extra vm_page structures in case we
    380 	 * want to return some memory to the pool after booting.
    381 	 */
    382 
    383 	freepages = 0;
    384 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    385 		seg = VM_PHYSMEM_PTR(lcv);
    386 		freepages += (seg->end - seg->start);
    387 	}
    388 
    389 	/*
    390 	 * Let MD code initialize the number of colors, or default
    391 	 * to 1 color if MD code doesn't care.
    392 	 */
    393 	if (uvmexp.ncolors == 0)
    394 		uvmexp.ncolors = 1;
    395 	uvmexp.colormask = uvmexp.ncolors - 1;
    396 	KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
    397 
    398 	/*
    399 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    400 	 * use.   for each page of memory we use we need a vm_page structure.
    401 	 * thus, the total number of pages we can use is the total size of
    402 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    403 	 * structure.   we add one to freepages as a fudge factor to avoid
    404 	 * truncation errors (since we can only allocate in terms of whole
    405 	 * pages).
    406 	 */
    407 
    408 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
    409 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    410 	    (PAGE_SIZE + sizeof(struct vm_page));
    411 
    412 	bucketarray = (void *)uvm_pageboot_alloc((bucketcount *
    413 	    sizeof(struct pgflbucket) * 2) + (pagecount *
    414 	    sizeof(struct vm_page)));
    415 	cpuarray = bucketarray + bucketcount;
    416 	pagearray = (struct vm_page *)(bucketarray + bucketcount * 2);
    417 
    418 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
    419 		uvm.page_free[lcv].pgfl_buckets =
    420 		    (bucketarray + (lcv * uvmexp.ncolors));
    421 		uvm_page_init_buckets(&uvm.page_free[lcv]);
    422 		uvm.cpus[0]->page_free[lcv].pgfl_buckets =
    423 		    (cpuarray + (lcv * uvmexp.ncolors));
    424 		uvm_page_init_buckets(&uvm.cpus[0]->page_free[lcv]);
    425 	}
    426 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    427 
    428 	/*
    429 	 * init the vm_page structures and put them in the correct place.
    430 	 */
    431 
    432 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    433 		seg = VM_PHYSMEM_PTR(lcv);
    434 		n = seg->end - seg->start;
    435 
    436 		/* set up page array pointers */
    437 		seg->pgs = pagearray;
    438 		pagearray += n;
    439 		pagecount -= n;
    440 		seg->lastpg = seg->pgs + n;
    441 
    442 		/* init and free vm_pages (we've already zeroed them) */
    443 		paddr = ctob(seg->start);
    444 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
    445 			seg->pgs[i].phys_addr = paddr;
    446 #ifdef __HAVE_VM_PAGE_MD
    447 			VM_MDPAGE_INIT(&seg->pgs[i]);
    448 #endif
    449 			if (atop(paddr) >= seg->avail_start &&
    450 			    atop(paddr) < seg->avail_end) {
    451 				uvmexp.npages++;
    452 				/* add page to free pool */
    453 				uvm_pagefree(&seg->pgs[i]);
    454 			}
    455 		}
    456 	}
    457 
    458 	/*
    459 	 * pass up the values of virtual_space_start and
    460 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    461 	 * layers of the VM.
    462 	 */
    463 
    464 	*kvm_startp = round_page(virtual_space_start);
    465 	*kvm_endp = trunc_page(virtual_space_end);
    466 #ifdef DEBUG
    467 	/*
    468 	 * steal kva for uvm_pagezerocheck().
    469 	 */
    470 	uvm_zerocheckkva = *kvm_startp;
    471 	*kvm_startp += PAGE_SIZE;
    472 #endif /* DEBUG */
    473 
    474 	/*
    475 	 * init various thresholds.
    476 	 */
    477 
    478 	uvmexp.reserve_pagedaemon = 1;
    479 	uvmexp.reserve_kernel = vm_page_reserve_kernel;
    480 
    481 	/*
    482 	 * determine if we should zero pages in the idle loop.
    483 	 */
    484 
    485 	uvm.cpus[0]->page_idle_zero = vm_page_zero_enable;
    486 
    487 	/*
    488 	 * done!
    489 	 */
    490 
    491 	uvm.page_init_done = true;
    492 }
    493 
    494 /*
    495  * uvm_setpagesize: set the page size
    496  *
    497  * => sets page_shift and page_mask from uvmexp.pagesize.
    498  */
    499 
    500 void
    501 uvm_setpagesize(void)
    502 {
    503 
    504 	/*
    505 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
    506 	 * to be a constant (indicated by being a non-zero value).
    507 	 */
    508 	if (uvmexp.pagesize == 0) {
    509 		if (PAGE_SIZE == 0)
    510 			panic("uvm_setpagesize: uvmexp.pagesize not set");
    511 		uvmexp.pagesize = PAGE_SIZE;
    512 	}
    513 	uvmexp.pagemask = uvmexp.pagesize - 1;
    514 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    515 		panic("uvm_setpagesize: page size %u (%#x) not a power of two",
    516 		    uvmexp.pagesize, uvmexp.pagesize);
    517 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    518 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    519 			break;
    520 }
    521 
    522 /*
    523  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    524  */
    525 
    526 vaddr_t
    527 uvm_pageboot_alloc(vsize_t size)
    528 {
    529 	static bool initialized = false;
    530 	vaddr_t addr;
    531 #if !defined(PMAP_STEAL_MEMORY)
    532 	vaddr_t vaddr;
    533 	paddr_t paddr;
    534 #endif
    535 
    536 	/*
    537 	 * on first call to this function, initialize ourselves.
    538 	 */
    539 	if (initialized == false) {
    540 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    541 
    542 		/* round it the way we like it */
    543 		virtual_space_start = round_page(virtual_space_start);
    544 		virtual_space_end = trunc_page(virtual_space_end);
    545 
    546 		initialized = true;
    547 	}
    548 
    549 	/* round to page size */
    550 	size = round_page(size);
    551 
    552 #if defined(PMAP_STEAL_MEMORY)
    553 
    554 	/*
    555 	 * defer bootstrap allocation to MD code (it may want to allocate
    556 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    557 	 * virtual_space_start/virtual_space_end if necessary.
    558 	 */
    559 
    560 	addr = pmap_steal_memory(size, &virtual_space_start,
    561 	    &virtual_space_end);
    562 
    563 	return(addr);
    564 
    565 #else /* !PMAP_STEAL_MEMORY */
    566 
    567 	/*
    568 	 * allocate virtual memory for this request
    569 	 */
    570 	if (virtual_space_start == virtual_space_end ||
    571 	    (virtual_space_end - virtual_space_start) < size)
    572 		panic("uvm_pageboot_alloc: out of virtual space");
    573 
    574 	addr = virtual_space_start;
    575 
    576 #ifdef PMAP_GROWKERNEL
    577 	/*
    578 	 * If the kernel pmap can't map the requested space,
    579 	 * then allocate more resources for it.
    580 	 */
    581 	if (uvm_maxkaddr < (addr + size)) {
    582 		uvm_maxkaddr = pmap_growkernel(addr + size);
    583 		if (uvm_maxkaddr < (addr + size))
    584 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    585 	}
    586 #endif
    587 
    588 	virtual_space_start += size;
    589 
    590 	/*
    591 	 * allocate and mapin physical pages to back new virtual pages
    592 	 */
    593 
    594 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    595 	    vaddr += PAGE_SIZE) {
    596 
    597 		if (!uvm_page_physget(&paddr))
    598 			panic("uvm_pageboot_alloc: out of memory");
    599 
    600 		/*
    601 		 * Note this memory is no longer managed, so using
    602 		 * pmap_kenter is safe.
    603 		 */
    604 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
    605 	}
    606 	pmap_update(pmap_kernel());
    607 	return(addr);
    608 #endif	/* PMAP_STEAL_MEMORY */
    609 }
    610 
    611 #if !defined(PMAP_STEAL_MEMORY)
    612 /*
    613  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    614  *
    615  * => attempt to allocate it off the end of a segment in which the "avail"
    616  *    values match the start/end values.   if we can't do that, then we
    617  *    will advance both values (making them equal, and removing some
    618  *    vm_page structures from the non-avail area).
    619  * => return false if out of memory.
    620  */
    621 
    622 /* subroutine: try to allocate from memory chunks on the specified freelist */
    623 static bool uvm_page_physget_freelist(paddr_t *, int);
    624 
    625 static bool
    626 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
    627 {
    628 	struct vm_physseg *seg;
    629 	int lcv, x;
    630 
    631 	/* pass 1: try allocating from a matching end */
    632 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    633 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
    634 #else
    635 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    636 #endif
    637 	{
    638 		seg = VM_PHYSMEM_PTR(lcv);
    639 
    640 		if (uvm.page_init_done == true)
    641 			panic("uvm_page_physget: called _after_ bootstrap");
    642 
    643 		if (seg->free_list != freelist)
    644 			continue;
    645 
    646 		/* try from front */
    647 		if (seg->avail_start == seg->start &&
    648 		    seg->avail_start < seg->avail_end) {
    649 			*paddrp = ctob(seg->avail_start);
    650 			seg->avail_start++;
    651 			seg->start++;
    652 			/* nothing left?   nuke it */
    653 			if (seg->avail_start == seg->end) {
    654 				if (vm_nphysmem == 1)
    655 				    panic("uvm_page_physget: out of memory!");
    656 				vm_nphysmem--;
    657 				for (x = lcv ; x < vm_nphysmem ; x++)
    658 					/* structure copy */
    659 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
    660 			}
    661 			return (true);
    662 		}
    663 
    664 		/* try from rear */
    665 		if (seg->avail_end == seg->end &&
    666 		    seg->avail_start < seg->avail_end) {
    667 			*paddrp = ctob(seg->avail_end - 1);
    668 			seg->avail_end--;
    669 			seg->end--;
    670 			/* nothing left?   nuke it */
    671 			if (seg->avail_end == seg->start) {
    672 				if (vm_nphysmem == 1)
    673 				    panic("uvm_page_physget: out of memory!");
    674 				vm_nphysmem--;
    675 				for (x = lcv ; x < vm_nphysmem ; x++)
    676 					/* structure copy */
    677 					VM_PHYSMEM_PTR_SWAP(x, x + 1);
    678 			}
    679 			return (true);
    680 		}
    681 	}
    682 
    683 	/* pass2: forget about matching ends, just allocate something */
    684 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    685 	for (lcv = vm_nphysmem - 1 ; lcv >= 0 ; lcv--)
    686 #else
    687 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    688 #endif
    689 	{
    690 		seg = VM_PHYSMEM_PTR(lcv);
    691 
    692 		/* any room in this bank? */
    693 		if (seg->avail_start >= seg->avail_end)
    694 			continue;  /* nope */
    695 
    696 		*paddrp = ctob(seg->avail_start);
    697 		seg->avail_start++;
    698 		/* truncate! */
    699 		seg->start = seg->avail_start;
    700 
    701 		/* nothing left?   nuke it */
    702 		if (seg->avail_start == seg->end) {
    703 			if (vm_nphysmem == 1)
    704 				panic("uvm_page_physget: out of memory!");
    705 			vm_nphysmem--;
    706 			for (x = lcv ; x < vm_nphysmem ; x++)
    707 				/* structure copy */
    708 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    709 		}
    710 		return (true);
    711 	}
    712 
    713 	return (false);        /* whoops! */
    714 }
    715 
    716 bool
    717 uvm_page_physget(paddr_t *paddrp)
    718 {
    719 	int i;
    720 
    721 	/* try in the order of freelist preference */
    722 	for (i = 0; i < VM_NFREELIST; i++)
    723 		if (uvm_page_physget_freelist(paddrp, i) == true)
    724 			return (true);
    725 	return (false);
    726 }
    727 #endif /* PMAP_STEAL_MEMORY */
    728 
    729 /*
    730  * uvm_page_physload: load physical memory into VM system
    731  *
    732  * => all args are PFs
    733  * => all pages in start/end get vm_page structures
    734  * => areas marked by avail_start/avail_end get added to the free page pool
    735  * => we are limited to VM_PHYSSEG_MAX physical memory segments
    736  */
    737 
    738 uvm_physseg_t
    739 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
    740     paddr_t avail_end, int free_list)
    741 {
    742 	int preload, lcv;
    743 	psize_t npages;
    744 	struct vm_page *pgs;
    745 	struct vm_physseg *ps;
    746 
    747 	if (uvmexp.pagesize == 0)
    748 		panic("uvm_page_physload: page size not set!");
    749 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
    750 		panic("uvm_page_physload: bad free list %d", free_list);
    751 	if (start >= end)
    752 		panic("uvm_page_physload: start >= end");
    753 
    754 	/*
    755 	 * do we have room?
    756 	 */
    757 
    758 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
    759 		printf("uvm_page_physload: unable to load physical memory "
    760 		    "segment\n");
    761 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
    762 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
    763 		printf("\tincrease VM_PHYSSEG_MAX\n");
    764 		return 0;
    765 	}
    766 
    767 	/*
    768 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
    769 	 * called yet, so kmem is not available).
    770 	 */
    771 
    772 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
    773 		if (VM_PHYSMEM_PTR(lcv)->pgs)
    774 			break;
    775 	}
    776 	preload = (lcv == vm_nphysmem);
    777 
    778 	/*
    779 	 * if VM is already running, attempt to kmem_alloc vm_page structures
    780 	 */
    781 
    782 	if (!preload) {
    783 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
    784 	} else {
    785 		pgs = NULL;
    786 		npages = 0;
    787 	}
    788 
    789 	/*
    790 	 * now insert us in the proper place in vm_physmem[]
    791 	 */
    792 
    793 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
    794 	/* random: put it at the end (easy!) */
    795 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
    796 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    797 	{
    798 		int x;
    799 		/* sort by address for binary search */
    800 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    801 			if (start < VM_PHYSMEM_PTR(lcv)->start)
    802 				break;
    803 		ps = VM_PHYSMEM_PTR(lcv);
    804 		/* move back other entries, if necessary ... */
    805 		for (x = vm_nphysmem ; x > lcv ; x--)
    806 			/* structure copy */
    807 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    808 	}
    809 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    810 	{
    811 		int x;
    812 		/* sort by largest segment first */
    813 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
    814 			if ((end - start) >
    815 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
    816 				break;
    817 		ps = VM_PHYSMEM_PTR(lcv);
    818 		/* move back other entries, if necessary ... */
    819 		for (x = vm_nphysmem ; x > lcv ; x--)
    820 			/* structure copy */
    821 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
    822 	}
    823 #else
    824 	panic("uvm_page_physload: unknown physseg strategy selected!");
    825 #endif
    826 
    827 	ps->start = start;
    828 	ps->end = end;
    829 	ps->avail_start = avail_start;
    830 	ps->avail_end = avail_end;
    831 	if (preload) {
    832 		ps->pgs = NULL;
    833 	} else {
    834 		ps->pgs = pgs;
    835 		ps->lastpg = pgs + npages;
    836 	}
    837 	ps->free_list = free_list;
    838 	vm_nphysmem++;
    839 
    840 	if (!preload) {
    841 		uvmpdpol_reinit();
    842 	}
    843 
    844 	return 0;
    845 }
    846 
    847 /*
    848  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
    849  */
    850 
    851 #if VM_PHYSSEG_MAX == 1
    852 static inline int vm_physseg_find_contig(struct vm_physseg *, int, paddr_t, int *);
    853 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    854 static inline int vm_physseg_find_bsearch(struct vm_physseg *, int, paddr_t, int *);
    855 #else
    856 static inline int vm_physseg_find_linear(struct vm_physseg *, int, paddr_t, int *);
    857 #endif
    858 
    859 /*
    860  * vm_physseg_find: find vm_physseg structure that belongs to a PA
    861  */
    862 int
    863 vm_physseg_find(paddr_t pframe, int *offp)
    864 {
    865 
    866 #if VM_PHYSSEG_MAX == 1
    867 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
    868 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    869 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
    870 #else
    871 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
    872 #endif
    873 }
    874 
    875 #if VM_PHYSSEG_MAX == 1
    876 static inline int
    877 vm_physseg_find_contig(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    878 {
    879 
    880 	/* 'contig' case */
    881 	if (pframe >= segs[0].start && pframe < segs[0].end) {
    882 		if (offp)
    883 			*offp = pframe - segs[0].start;
    884 		return(0);
    885 	}
    886 	return(-1);
    887 }
    888 
    889 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
    890 
    891 static inline int
    892 vm_physseg_find_bsearch(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    893 {
    894 	/* binary search for it */
    895 	u_int	start, len, guess;
    896 
    897 	/*
    898 	 * if try is too large (thus target is less than try) we reduce
    899 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
    900 	 *
    901 	 * if the try is too small (thus target is greater than try) then
    902 	 * we set the new start to be (try + 1).   this means we need to
    903 	 * reduce the length to (round(len/2) - 1).
    904 	 *
    905 	 * note "adjust" below which takes advantage of the fact that
    906 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
    907 	 * for any value of len we may have
    908 	 */
    909 
    910 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
    911 		guess = start + (len / 2);	/* try in the middle */
    912 
    913 		/* start past our try? */
    914 		if (pframe >= segs[guess].start) {
    915 			/* was try correct? */
    916 			if (pframe < segs[guess].end) {
    917 				if (offp)
    918 					*offp = pframe - segs[guess].start;
    919 				return guess;            /* got it */
    920 			}
    921 			start = guess + 1;	/* next time, start here */
    922 			len--;			/* "adjust" */
    923 		} else {
    924 			/*
    925 			 * pframe before try, just reduce length of
    926 			 * region, done in "for" loop
    927 			 */
    928 		}
    929 	}
    930 	return(-1);
    931 }
    932 
    933 #else
    934 
    935 static inline int
    936 vm_physseg_find_linear(struct vm_physseg *segs, int nsegs, paddr_t pframe, int *offp)
    937 {
    938 	/* linear search for it */
    939 	int	lcv;
    940 
    941 	for (lcv = 0; lcv < nsegs; lcv++) {
    942 		if (pframe >= segs[lcv].start &&
    943 		    pframe < segs[lcv].end) {
    944 			if (offp)
    945 				*offp = pframe - segs[lcv].start;
    946 			return(lcv);		   /* got it */
    947 		}
    948 	}
    949 	return(-1);
    950 }
    951 #endif
    952 
    953 /*
    954  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
    955  * back from an I/O mapping (ugh!).   used in some MD code as well.
    956  */
    957 struct vm_page *
    958 uvm_phys_to_vm_page(paddr_t pa)
    959 {
    960 	paddr_t pf = atop(pa);
    961 	int	off;
    962 	int	psi;
    963 
    964 	psi = vm_physseg_find(pf, &off);
    965 	if (psi != -1)
    966 		return(&VM_PHYSMEM_PTR(psi)->pgs[off]);
    967 	return(NULL);
    968 }
    969 
    970 paddr_t
    971 uvm_vm_page_to_phys(const struct vm_page *pg)
    972 {
    973 
    974 	return pg->phys_addr;
    975 }
    976 
    977 /*
    978  * uvm_page_recolor: Recolor the pages if the new bucket count is
    979  * larger than the old one.
    980  */
    981 
    982 void
    983 uvm_page_recolor(int newncolors)
    984 {
    985 	struct pgflbucket *bucketarray, *cpuarray, *oldbucketarray;
    986 	struct pgfreelist gpgfl, pgfl;
    987 	struct vm_page *pg;
    988 	vsize_t bucketcount;
    989 	size_t bucketmemsize, oldbucketmemsize;
    990 	int lcv, color, i, ocolors;
    991 	struct uvm_cpu *ucpu;
    992 
    993 	KASSERT(((newncolors - 1) & newncolors) == 0);
    994 
    995 	if (newncolors <= uvmexp.ncolors)
    996 		return;
    997 
    998 	if (uvm.page_init_done == false) {
    999 		uvmexp.ncolors = newncolors;
   1000 		return;
   1001 	}
   1002 
   1003 	bucketcount = newncolors * VM_NFREELIST;
   1004 	bucketmemsize = bucketcount * sizeof(struct pgflbucket) * 2;
   1005 	bucketarray = kmem_alloc(bucketmemsize, KM_SLEEP);
   1006 	cpuarray = bucketarray + bucketcount;
   1007 	if (bucketarray == NULL) {
   1008 		printf("WARNING: unable to allocate %ld page color buckets\n",
   1009 		    (long) bucketcount);
   1010 		return;
   1011 	}
   1012 
   1013 	mutex_spin_enter(&uvm_fpageqlock);
   1014 
   1015 	/* Make sure we should still do this. */
   1016 	if (newncolors <= uvmexp.ncolors) {
   1017 		mutex_spin_exit(&uvm_fpageqlock);
   1018 		kmem_free(bucketarray, bucketmemsize);
   1019 		return;
   1020 	}
   1021 
   1022 	oldbucketarray = uvm.page_free[0].pgfl_buckets;
   1023 	ocolors = uvmexp.ncolors;
   1024 
   1025 	uvmexp.ncolors = newncolors;
   1026 	uvmexp.colormask = uvmexp.ncolors - 1;
   1027 
   1028 	ucpu = curcpu()->ci_data.cpu_uvm;
   1029 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1030 		gpgfl.pgfl_buckets = (bucketarray + (lcv * newncolors));
   1031 		pgfl.pgfl_buckets = (cpuarray + (lcv * uvmexp.ncolors));
   1032 		uvm_page_init_buckets(&gpgfl);
   1033 		uvm_page_init_buckets(&pgfl);
   1034 		for (color = 0; color < ocolors; color++) {
   1035 			for (i = 0; i < PGFL_NQUEUES; i++) {
   1036 				while ((pg = LIST_FIRST(&uvm.page_free[
   1037 				    lcv].pgfl_buckets[color].pgfl_queues[i]))
   1038 				    != NULL) {
   1039 					LIST_REMOVE(pg, pageq.list); /* global */
   1040 					LIST_REMOVE(pg, listq.list); /* cpu */
   1041 					LIST_INSERT_HEAD(&gpgfl.pgfl_buckets[
   1042 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
   1043 					    i], pg, pageq.list);
   1044 					LIST_INSERT_HEAD(&pgfl.pgfl_buckets[
   1045 					    VM_PGCOLOR_BUCKET(pg)].pgfl_queues[
   1046 					    i], pg, listq.list);
   1047 				}
   1048 			}
   1049 		}
   1050 		uvm.page_free[lcv].pgfl_buckets = gpgfl.pgfl_buckets;
   1051 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
   1052 	}
   1053 
   1054 	oldbucketmemsize = recolored_pages_memsize;
   1055 
   1056 	recolored_pages_memsize = bucketmemsize;
   1057 	mutex_spin_exit(&uvm_fpageqlock);
   1058 
   1059 	if (oldbucketmemsize) {
   1060 		kmem_free(oldbucketarray, recolored_pages_memsize);
   1061 	}
   1062 
   1063 	/*
   1064 	 * this calls uvm_km_alloc() which may want to hold
   1065 	 * uvm_fpageqlock.
   1066 	 */
   1067 	uvm_pager_realloc_emerg();
   1068 }
   1069 
   1070 /*
   1071  * uvm_cpu_attach: initialize per-CPU data structures.
   1072  */
   1073 
   1074 void
   1075 uvm_cpu_attach(struct cpu_info *ci)
   1076 {
   1077 	struct pgflbucket *bucketarray;
   1078 	struct pgfreelist pgfl;
   1079 	struct uvm_cpu *ucpu;
   1080 	vsize_t bucketcount;
   1081 	int lcv;
   1082 
   1083 	if (CPU_IS_PRIMARY(ci)) {
   1084 		/* Already done in uvm_page_init(). */
   1085 		goto attachrnd;
   1086 	}
   1087 
   1088 	/* Add more reserve pages for this CPU. */
   1089 	uvmexp.reserve_kernel += vm_page_reserve_kernel;
   1090 
   1091 	/* Configure this CPU's free lists. */
   1092 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
   1093 	bucketarray = kmem_alloc(bucketcount * sizeof(struct pgflbucket),
   1094 	    KM_SLEEP);
   1095 	ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
   1096 	uvm.cpus[cpu_index(ci)] = ucpu;
   1097 	ci->ci_data.cpu_uvm = ucpu;
   1098 	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1099 		pgfl.pgfl_buckets = (bucketarray + (lcv * uvmexp.ncolors));
   1100 		uvm_page_init_buckets(&pgfl);
   1101 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
   1102 	}
   1103 
   1104 attachrnd:
   1105 	/*
   1106 	 * Attach RNG source for this CPU's VM events
   1107 	 */
   1108         rnd_attach_source(&uvm.cpus[cpu_index(ci)]->rs,
   1109 			  ci->ci_data.cpu_name, RND_TYPE_VM,
   1110 			  RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
   1111 			  RND_FLAG_ESTIMATE_VALUE);
   1112 
   1113 }
   1114 
   1115 /*
   1116  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat
   1117  */
   1118 
   1119 static struct vm_page *
   1120 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int flist, int try1, int try2,
   1121     int *trycolorp)
   1122 {
   1123 	struct pgflist *freeq;
   1124 	struct vm_page *pg;
   1125 	int color, trycolor = *trycolorp;
   1126 	struct pgfreelist *gpgfl, *pgfl;
   1127 
   1128 	KASSERT(mutex_owned(&uvm_fpageqlock));
   1129 
   1130 	color = trycolor;
   1131 	pgfl = &ucpu->page_free[flist];
   1132 	gpgfl = &uvm.page_free[flist];
   1133 	do {
   1134 		/* cpu, try1 */
   1135 		if ((pg = LIST_FIRST((freeq =
   1136 		    &pgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
   1137 			KASSERT(pg->pqflags & PQ_FREE);
   1138 			KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
   1139 			KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
   1140 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
   1141 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
   1142 		    	uvmexp.cpuhit++;
   1143 			goto gotit;
   1144 		}
   1145 		/* global, try1 */
   1146 		if ((pg = LIST_FIRST((freeq =
   1147 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try1]))) != NULL) {
   1148 			KASSERT(pg->pqflags & PQ_FREE);
   1149 			KASSERT(try1 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
   1150 			KASSERT(try1 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
   1151 			KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
   1152 			VM_FREE_PAGE_TO_CPU(pg)->pages[try1]--;
   1153 		    	uvmexp.cpumiss++;
   1154 			goto gotit;
   1155 		}
   1156 		/* cpu, try2 */
   1157 		if ((pg = LIST_FIRST((freeq =
   1158 		    &pgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1159 			KASSERT(pg->pqflags & PQ_FREE);
   1160 			KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
   1161 			KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
   1162 			KASSERT(ucpu == VM_FREE_PAGE_TO_CPU(pg));
   1163 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1164 		    	uvmexp.cpuhit++;
   1165 			goto gotit;
   1166 		}
   1167 		/* global, try2 */
   1168 		if ((pg = LIST_FIRST((freeq =
   1169 		    &gpgfl->pgfl_buckets[color].pgfl_queues[try2]))) != NULL) {
   1170 			KASSERT(pg->pqflags & PQ_FREE);
   1171 			KASSERT(try2 == PGFL_ZEROS || !(pg->flags & PG_ZERO));
   1172 			KASSERT(try2 == PGFL_UNKNOWN || (pg->flags & PG_ZERO));
   1173 			KASSERT(ucpu != VM_FREE_PAGE_TO_CPU(pg));
   1174 			VM_FREE_PAGE_TO_CPU(pg)->pages[try2]--;
   1175 		    	uvmexp.cpumiss++;
   1176 			goto gotit;
   1177 		}
   1178 		color = (color + 1) & uvmexp.colormask;
   1179 	} while (color != trycolor);
   1180 
   1181 	return (NULL);
   1182 
   1183  gotit:
   1184 	LIST_REMOVE(pg, pageq.list);	/* global list */
   1185 	LIST_REMOVE(pg, listq.list);	/* per-cpu list */
   1186 	uvmexp.free--;
   1187 
   1188 	/* update zero'd page count */
   1189 	if (pg->flags & PG_ZERO)
   1190 		uvmexp.zeropages--;
   1191 
   1192 	if (color == trycolor)
   1193 		uvmexp.colorhit++;
   1194 	else {
   1195 		uvmexp.colormiss++;
   1196 		*trycolorp = color;
   1197 	}
   1198 
   1199 	return (pg);
   1200 }
   1201 
   1202 /*
   1203  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
   1204  *
   1205  * => return null if no pages free
   1206  * => wake up pagedaemon if number of free pages drops below low water mark
   1207  * => if obj != NULL, obj must be locked (to put in obj's tree)
   1208  * => if anon != NULL, anon must be locked (to put in anon)
   1209  * => only one of obj or anon can be non-null
   1210  * => caller must activate/deactivate page if it is not wired.
   1211  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
   1212  * => policy decision: it is more important to pull a page off of the
   1213  *	appropriate priority free list than it is to get a zero'd or
   1214  *	unknown contents page.  This is because we live with the
   1215  *	consequences of a bad free list decision for the entire
   1216  *	lifetime of the page, e.g. if the page comes from memory that
   1217  *	is slower to access.
   1218  */
   1219 
   1220 struct vm_page *
   1221 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
   1222     int flags, int strat, int free_list)
   1223 {
   1224 	int lcv, try1, try2, zeroit = 0, color;
   1225 	struct uvm_cpu *ucpu;
   1226 	struct vm_page *pg;
   1227 	lwp_t *l;
   1228 
   1229 	KASSERT(obj == NULL || anon == NULL);
   1230 	KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
   1231 	KASSERT(off == trunc_page(off));
   1232 	KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
   1233 	KASSERT(anon == NULL || anon->an_lock == NULL ||
   1234 	    mutex_owned(anon->an_lock));
   1235 
   1236 	mutex_spin_enter(&uvm_fpageqlock);
   1237 
   1238 	/*
   1239 	 * This implements a global round-robin page coloring
   1240 	 * algorithm.
   1241 	 */
   1242 
   1243 	ucpu = curcpu()->ci_data.cpu_uvm;
   1244 	if (flags & UVM_FLAG_COLORMATCH) {
   1245 		color = atop(off) & uvmexp.colormask;
   1246 	} else {
   1247 		color = ucpu->page_free_nextcolor;
   1248 	}
   1249 
   1250 	/*
   1251 	 * check to see if we need to generate some free pages waking
   1252 	 * the pagedaemon.
   1253 	 */
   1254 
   1255 	uvm_kick_pdaemon();
   1256 
   1257 	/*
   1258 	 * fail if any of these conditions is true:
   1259 	 * [1]  there really are no free pages, or
   1260 	 * [2]  only kernel "reserved" pages remain and
   1261 	 *        reserved pages have not been requested.
   1262 	 * [3]  only pagedaemon "reserved" pages remain and
   1263 	 *        the requestor isn't the pagedaemon.
   1264 	 * we make kernel reserve pages available if called by a
   1265 	 * kernel thread or a realtime thread.
   1266 	 */
   1267 	l = curlwp;
   1268 	if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
   1269 		flags |= UVM_PGA_USERESERVE;
   1270 	}
   1271 	if ((uvmexp.free <= uvmexp.reserve_kernel &&
   1272 	    (flags & UVM_PGA_USERESERVE) == 0) ||
   1273 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
   1274 	     curlwp != uvm.pagedaemon_lwp))
   1275 		goto fail;
   1276 
   1277 #if PGFL_NQUEUES != 2
   1278 #error uvm_pagealloc_strat needs to be updated
   1279 #endif
   1280 
   1281 	/*
   1282 	 * If we want a zero'd page, try the ZEROS queue first, otherwise
   1283 	 * we try the UNKNOWN queue first.
   1284 	 */
   1285 	if (flags & UVM_PGA_ZERO) {
   1286 		try1 = PGFL_ZEROS;
   1287 		try2 = PGFL_UNKNOWN;
   1288 	} else {
   1289 		try1 = PGFL_UNKNOWN;
   1290 		try2 = PGFL_ZEROS;
   1291 	}
   1292 
   1293  again:
   1294 	switch (strat) {
   1295 	case UVM_PGA_STRAT_NORMAL:
   1296 		/* Check freelists: descending priority (ascending id) order */
   1297 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1298 			pg = uvm_pagealloc_pgfl(ucpu, lcv,
   1299 			    try1, try2, &color);
   1300 			if (pg != NULL)
   1301 				goto gotit;
   1302 		}
   1303 
   1304 		/* No pages free! */
   1305 		goto fail;
   1306 
   1307 	case UVM_PGA_STRAT_ONLY:
   1308 	case UVM_PGA_STRAT_FALLBACK:
   1309 		/* Attempt to allocate from the specified free list. */
   1310 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1311 		pg = uvm_pagealloc_pgfl(ucpu, free_list,
   1312 		    try1, try2, &color);
   1313 		if (pg != NULL)
   1314 			goto gotit;
   1315 
   1316 		/* Fall back, if possible. */
   1317 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1318 			strat = UVM_PGA_STRAT_NORMAL;
   1319 			goto again;
   1320 		}
   1321 
   1322 		/* No pages free! */
   1323 		goto fail;
   1324 
   1325 	default:
   1326 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1327 		/* NOTREACHED */
   1328 	}
   1329 
   1330  gotit:
   1331 	/*
   1332 	 * We now know which color we actually allocated from; set
   1333 	 * the next color accordingly.
   1334 	 */
   1335 
   1336 	ucpu->page_free_nextcolor = (color + 1) & uvmexp.colormask;
   1337 
   1338 	/*
   1339 	 * update allocation statistics and remember if we have to
   1340 	 * zero the page
   1341 	 */
   1342 
   1343 	if (flags & UVM_PGA_ZERO) {
   1344 		if (pg->flags & PG_ZERO) {
   1345 			uvmexp.pga_zerohit++;
   1346 			zeroit = 0;
   1347 		} else {
   1348 			uvmexp.pga_zeromiss++;
   1349 			zeroit = 1;
   1350 		}
   1351 		if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1352 			ucpu->page_idle_zero = vm_page_zero_enable;
   1353 		}
   1354 	}
   1355 	KASSERT(pg->pqflags == PQ_FREE);
   1356 
   1357 	pg->offset = off;
   1358 	pg->uobject = obj;
   1359 	pg->uanon = anon;
   1360 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1361 	if (anon) {
   1362 		anon->an_page = pg;
   1363 		pg->pqflags = PQ_ANON;
   1364 		atomic_inc_uint(&uvmexp.anonpages);
   1365 	} else {
   1366 		if (obj) {
   1367 			uvm_pageinsert(obj, pg);
   1368 		}
   1369 		pg->pqflags = 0;
   1370 	}
   1371 	mutex_spin_exit(&uvm_fpageqlock);
   1372 
   1373 #if defined(UVM_PAGE_TRKOWN)
   1374 	pg->owner_tag = NULL;
   1375 #endif
   1376 	UVM_PAGE_OWN(pg, "new alloc");
   1377 
   1378 	if (flags & UVM_PGA_ZERO) {
   1379 		/*
   1380 		 * A zero'd page is not clean.  If we got a page not already
   1381 		 * zero'd, then we have to zero it ourselves.
   1382 		 */
   1383 		pg->flags &= ~PG_CLEAN;
   1384 		if (zeroit)
   1385 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1386 	}
   1387 
   1388 	return(pg);
   1389 
   1390  fail:
   1391 	mutex_spin_exit(&uvm_fpageqlock);
   1392 	return (NULL);
   1393 }
   1394 
   1395 /*
   1396  * uvm_pagereplace: replace a page with another
   1397  *
   1398  * => object must be locked
   1399  */
   1400 
   1401 void
   1402 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
   1403 {
   1404 	struct uvm_object *uobj = oldpg->uobject;
   1405 
   1406 	KASSERT((oldpg->flags & PG_TABLED) != 0);
   1407 	KASSERT(uobj != NULL);
   1408 	KASSERT((newpg->flags & PG_TABLED) == 0);
   1409 	KASSERT(newpg->uobject == NULL);
   1410 	KASSERT(mutex_owned(uobj->vmobjlock));
   1411 
   1412 	newpg->uobject = uobj;
   1413 	newpg->offset = oldpg->offset;
   1414 
   1415 	uvm_pageremove_tree(uobj, oldpg);
   1416 	uvm_pageinsert_tree(uobj, newpg);
   1417 	uvm_pageinsert_list(uobj, newpg, oldpg);
   1418 	uvm_pageremove_list(uobj, oldpg);
   1419 }
   1420 
   1421 /*
   1422  * uvm_pagerealloc: reallocate a page from one object to another
   1423  *
   1424  * => both objects must be locked
   1425  */
   1426 
   1427 void
   1428 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
   1429 {
   1430 	/*
   1431 	 * remove it from the old object
   1432 	 */
   1433 
   1434 	if (pg->uobject) {
   1435 		uvm_pageremove(pg->uobject, pg);
   1436 	}
   1437 
   1438 	/*
   1439 	 * put it in the new object
   1440 	 */
   1441 
   1442 	if (newobj) {
   1443 		pg->uobject = newobj;
   1444 		pg->offset = newoff;
   1445 		uvm_pageinsert(newobj, pg);
   1446 	}
   1447 }
   1448 
   1449 #ifdef DEBUG
   1450 /*
   1451  * check if page is zero-filled
   1452  *
   1453  *  - called with free page queue lock held.
   1454  */
   1455 void
   1456 uvm_pagezerocheck(struct vm_page *pg)
   1457 {
   1458 	int *p, *ep;
   1459 
   1460 	KASSERT(uvm_zerocheckkva != 0);
   1461 	KASSERT(mutex_owned(&uvm_fpageqlock));
   1462 
   1463 	/*
   1464 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
   1465 	 * uvm page allocator.
   1466 	 *
   1467 	 * it might be better to have "CPU-local temporary map" pmap interface.
   1468 	 */
   1469 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
   1470 	p = (int *)uvm_zerocheckkva;
   1471 	ep = (int *)((char *)p + PAGE_SIZE);
   1472 	pmap_update(pmap_kernel());
   1473 	while (p < ep) {
   1474 		if (*p != 0)
   1475 			panic("PG_ZERO page isn't zero-filled");
   1476 		p++;
   1477 	}
   1478 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
   1479 	/*
   1480 	 * pmap_update() is not necessary here because no one except us
   1481 	 * uses this VA.
   1482 	 */
   1483 }
   1484 #endif /* DEBUG */
   1485 
   1486 /*
   1487  * uvm_pagefree: free page
   1488  *
   1489  * => erase page's identity (i.e. remove from object)
   1490  * => put page on free list
   1491  * => caller must lock owning object (either anon or uvm_object)
   1492  * => caller must lock page queues
   1493  * => assumes all valid mappings of pg are gone
   1494  */
   1495 
   1496 void
   1497 uvm_pagefree(struct vm_page *pg)
   1498 {
   1499 	struct pgflist *pgfl;
   1500 	struct uvm_cpu *ucpu;
   1501 	int index, color, queue;
   1502 	bool iszero;
   1503 
   1504 #ifdef DEBUG
   1505 	if (pg->uobject == (void *)0xdeadbeef &&
   1506 	    pg->uanon == (void *)0xdeadbeef) {
   1507 		panic("uvm_pagefree: freeing free page %p", pg);
   1508 	}
   1509 #endif /* DEBUG */
   1510 
   1511 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1512 	KASSERT(!(pg->pqflags & PQ_FREE));
   1513 	//KASSERT(mutex_owned(&uvm_pageqlock) || !uvmpdpol_pageisqueued_p(pg));
   1514 	KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
   1515 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1516 		mutex_owned(pg->uanon->an_lock));
   1517 
   1518 	/*
   1519 	 * if the page is loaned, resolve the loan instead of freeing.
   1520 	 */
   1521 
   1522 	if (pg->loan_count) {
   1523 		KASSERT(pg->wire_count == 0);
   1524 
   1525 		/*
   1526 		 * if the page is owned by an anon then we just want to
   1527 		 * drop anon ownership.  the kernel will free the page when
   1528 		 * it is done with it.  if the page is owned by an object,
   1529 		 * remove it from the object and mark it dirty for the benefit
   1530 		 * of possible anon owners.
   1531 		 *
   1532 		 * regardless of previous ownership, wakeup any waiters,
   1533 		 * unbusy the page, and we're done.
   1534 		 */
   1535 
   1536 		if (pg->uobject != NULL) {
   1537 			uvm_pageremove(pg->uobject, pg);
   1538 			pg->flags &= ~PG_CLEAN;
   1539 		} else if (pg->uanon != NULL) {
   1540 			if ((pg->pqflags & PQ_ANON) == 0) {
   1541 				pg->loan_count--;
   1542 			} else {
   1543 				pg->pqflags &= ~PQ_ANON;
   1544 				atomic_dec_uint(&uvmexp.anonpages);
   1545 			}
   1546 			pg->uanon->an_page = NULL;
   1547 			pg->uanon = NULL;
   1548 		}
   1549 		if (pg->flags & PG_WANTED) {
   1550 			wakeup(pg);
   1551 		}
   1552 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
   1553 #ifdef UVM_PAGE_TRKOWN
   1554 		pg->owner_tag = NULL;
   1555 #endif
   1556 		if (pg->loan_count) {
   1557 			KASSERT(pg->uobject == NULL);
   1558 			if (pg->uanon == NULL) {
   1559 				KASSERT(mutex_owned(&uvm_pageqlock));
   1560 				uvm_pagedequeue(pg);
   1561 			}
   1562 			return;
   1563 		}
   1564 	}
   1565 
   1566 	/*
   1567 	 * remove page from its object or anon.
   1568 	 */
   1569 
   1570 	if (pg->uobject != NULL) {
   1571 		uvm_pageremove(pg->uobject, pg);
   1572 	} else if (pg->uanon != NULL) {
   1573 		pg->uanon->an_page = NULL;
   1574 		atomic_dec_uint(&uvmexp.anonpages);
   1575 	}
   1576 
   1577 	/*
   1578 	 * now remove the page from the queues.
   1579 	 */
   1580 	if (uvmpdpol_pageisqueued_p(pg)) {
   1581 		KASSERT(mutex_owned(&uvm_pageqlock));
   1582 		uvm_pagedequeue(pg);
   1583 	}
   1584 
   1585 	/*
   1586 	 * if the page was wired, unwire it now.
   1587 	 */
   1588 
   1589 	if (pg->wire_count) {
   1590 		pg->wire_count = 0;
   1591 		uvmexp.wired--;
   1592 	}
   1593 
   1594 	/*
   1595 	 * and put on free queue
   1596 	 */
   1597 
   1598 	iszero = (pg->flags & PG_ZERO);
   1599 	index = uvm_page_lookup_freelist(pg);
   1600 	color = VM_PGCOLOR_BUCKET(pg);
   1601 	queue = (iszero ? PGFL_ZEROS : PGFL_UNKNOWN);
   1602 
   1603 #ifdef DEBUG
   1604 	pg->uobject = (void *)0xdeadbeef;
   1605 	pg->uanon = (void *)0xdeadbeef;
   1606 #endif
   1607 
   1608 	mutex_spin_enter(&uvm_fpageqlock);
   1609 	pg->pqflags = PQ_FREE;
   1610 
   1611 #ifdef DEBUG
   1612 	if (iszero)
   1613 		uvm_pagezerocheck(pg);
   1614 #endif /* DEBUG */
   1615 
   1616 
   1617 	/* global list */
   1618 	pgfl = &uvm.page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1619 	LIST_INSERT_HEAD(pgfl, pg, pageq.list);
   1620 	uvmexp.free++;
   1621 	if (iszero) {
   1622 		uvmexp.zeropages++;
   1623 	}
   1624 
   1625 	/* per-cpu list */
   1626 	ucpu = curcpu()->ci_data.cpu_uvm;
   1627 	pg->offset = (uintptr_t)ucpu;
   1628 	pgfl = &ucpu->page_free[index].pgfl_buckets[color].pgfl_queues[queue];
   1629 	LIST_INSERT_HEAD(pgfl, pg, listq.list);
   1630 	ucpu->pages[queue]++;
   1631 	if (ucpu->pages[PGFL_ZEROS] < ucpu->pages[PGFL_UNKNOWN]) {
   1632 		ucpu->page_idle_zero = vm_page_zero_enable;
   1633 	}
   1634 
   1635 	mutex_spin_exit(&uvm_fpageqlock);
   1636 }
   1637 
   1638 /*
   1639  * uvm_page_unbusy: unbusy an array of pages.
   1640  *
   1641  * => pages must either all belong to the same object, or all belong to anons.
   1642  * => if pages are object-owned, object must be locked.
   1643  * => if pages are anon-owned, anons must be locked.
   1644  * => caller must lock page queues if pages may be released.
   1645  * => caller must make sure that anon-owned pages are not PG_RELEASED.
   1646  */
   1647 
   1648 void
   1649 uvm_page_unbusy(struct vm_page **pgs, int npgs)
   1650 {
   1651 	struct vm_page *pg;
   1652 	int i;
   1653 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1654 
   1655 	for (i = 0; i < npgs; i++) {
   1656 		pg = pgs[i];
   1657 		if (pg == NULL || pg == PGO_DONTCARE) {
   1658 			continue;
   1659 		}
   1660 
   1661 		KASSERT(uvm_page_locked_p(pg));
   1662 		KASSERT(pg->flags & PG_BUSY);
   1663 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1664 		if (pg->flags & PG_WANTED) {
   1665 			wakeup(pg);
   1666 		}
   1667 		if (pg->flags & PG_RELEASED) {
   1668 			UVMHIST_LOG(ubchist, "releasing pg %p", pg,0,0,0);
   1669 			KASSERT(pg->uobject != NULL ||
   1670 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
   1671 			pg->flags &= ~PG_RELEASED;
   1672 			uvm_pagefree(pg);
   1673 		} else {
   1674 			UVMHIST_LOG(ubchist, "unbusying pg %p", pg,0,0,0);
   1675 			KASSERT((pg->flags & PG_FAKE) == 0);
   1676 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1677 			UVM_PAGE_OWN(pg, NULL);
   1678 		}
   1679 	}
   1680 }
   1681 
   1682 #if defined(UVM_PAGE_TRKOWN)
   1683 /*
   1684  * uvm_page_own: set or release page ownership
   1685  *
   1686  * => this is a debugging function that keeps track of who sets PG_BUSY
   1687  *	and where they do it.   it can be used to track down problems
   1688  *	such a process setting "PG_BUSY" and never releasing it.
   1689  * => page's object [if any] must be locked
   1690  * => if "tag" is NULL then we are releasing page ownership
   1691  */
   1692 void
   1693 uvm_page_own(struct vm_page *pg, const char *tag)
   1694 {
   1695 
   1696 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1697 	KASSERT((pg->flags & PG_WANTED) == 0);
   1698 	KASSERT(uvm_page_locked_p(pg));
   1699 
   1700 	/* gain ownership? */
   1701 	if (tag) {
   1702 		KASSERT((pg->flags & PG_BUSY) != 0);
   1703 		if (pg->owner_tag) {
   1704 			printf("uvm_page_own: page %p already owned "
   1705 			    "by proc %d [%s]\n", pg,
   1706 			    pg->owner, pg->owner_tag);
   1707 			panic("uvm_page_own");
   1708 		}
   1709 		pg->owner = curproc->p_pid;
   1710 		pg->lowner = curlwp->l_lid;
   1711 		pg->owner_tag = tag;
   1712 		return;
   1713 	}
   1714 
   1715 	/* drop ownership */
   1716 	KASSERT((pg->flags & PG_BUSY) == 0);
   1717 	if (pg->owner_tag == NULL) {
   1718 		printf("uvm_page_own: dropping ownership of an non-owned "
   1719 		    "page (%p)\n", pg);
   1720 		panic("uvm_page_own");
   1721 	}
   1722 	if (!uvmpdpol_pageisqueued_p(pg)) {
   1723 		KASSERT((pg->uanon == NULL && pg->uobject == NULL) ||
   1724 		    pg->wire_count > 0);
   1725 	} else {
   1726 		KASSERT(pg->wire_count == 0);
   1727 	}
   1728 	pg->owner_tag = NULL;
   1729 }
   1730 #endif
   1731 
   1732 /*
   1733  * uvm_pageidlezero: zero free pages while the system is idle.
   1734  *
   1735  * => try to complete one color bucket at a time, to reduce our impact
   1736  *	on the CPU cache.
   1737  * => we loop until we either reach the target or there is a lwp ready
   1738  *      to run, or MD code detects a reason to break early.
   1739  */
   1740 void
   1741 uvm_pageidlezero(void)
   1742 {
   1743 	struct vm_page *pg;
   1744 	struct pgfreelist *pgfl, *gpgfl;
   1745 	struct uvm_cpu *ucpu;
   1746 	int free_list, firstbucket, nextbucket;
   1747 	bool lcont = false;
   1748 
   1749 	ucpu = curcpu()->ci_data.cpu_uvm;
   1750 	if (!ucpu->page_idle_zero ||
   1751 	    ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1752 	    	ucpu->page_idle_zero = false;
   1753 		return;
   1754 	}
   1755 	if (!mutex_tryenter(&uvm_fpageqlock)) {
   1756 		/* Contention: let other CPUs to use the lock. */
   1757 		return;
   1758 	}
   1759 	firstbucket = ucpu->page_free_nextcolor;
   1760 	nextbucket = firstbucket;
   1761 	do {
   1762 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
   1763 			if (sched_curcpu_runnable_p()) {
   1764 				goto quit;
   1765 			}
   1766 			pgfl = &ucpu->page_free[free_list];
   1767 			gpgfl = &uvm.page_free[free_list];
   1768 			while ((pg = LIST_FIRST(&pgfl->pgfl_buckets[
   1769 			    nextbucket].pgfl_queues[PGFL_UNKNOWN])) != NULL) {
   1770 				if (lcont || sched_curcpu_runnable_p()) {
   1771 					goto quit;
   1772 				}
   1773 				LIST_REMOVE(pg, pageq.list); /* global list */
   1774 				LIST_REMOVE(pg, listq.list); /* per-cpu list */
   1775 				ucpu->pages[PGFL_UNKNOWN]--;
   1776 				uvmexp.free--;
   1777 				KASSERT(pg->pqflags == PQ_FREE);
   1778 				pg->pqflags = 0;
   1779 				mutex_spin_exit(&uvm_fpageqlock);
   1780 #ifdef PMAP_PAGEIDLEZERO
   1781 				if (!PMAP_PAGEIDLEZERO(VM_PAGE_TO_PHYS(pg))) {
   1782 
   1783 					/*
   1784 					 * The machine-dependent code detected
   1785 					 * some reason for us to abort zeroing
   1786 					 * pages, probably because there is a
   1787 					 * process now ready to run.
   1788 					 */
   1789 
   1790 					mutex_spin_enter(&uvm_fpageqlock);
   1791 					pg->pqflags = PQ_FREE;
   1792 					LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1793 					    nextbucket].pgfl_queues[
   1794 					    PGFL_UNKNOWN], pg, pageq.list);
   1795 					LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1796 					    nextbucket].pgfl_queues[
   1797 					    PGFL_UNKNOWN], pg, listq.list);
   1798 					ucpu->pages[PGFL_UNKNOWN]++;
   1799 					uvmexp.free++;
   1800 					uvmexp.zeroaborts++;
   1801 					goto quit;
   1802 				}
   1803 #else
   1804 				pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1805 #endif /* PMAP_PAGEIDLEZERO */
   1806 				pg->flags |= PG_ZERO;
   1807 
   1808 				if (!mutex_tryenter(&uvm_fpageqlock)) {
   1809 					lcont = true;
   1810 					mutex_spin_enter(&uvm_fpageqlock);
   1811 				} else {
   1812 					lcont = false;
   1813 				}
   1814 				pg->pqflags = PQ_FREE;
   1815 				LIST_INSERT_HEAD(&gpgfl->pgfl_buckets[
   1816 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1817 				    pg, pageq.list);
   1818 				LIST_INSERT_HEAD(&pgfl->pgfl_buckets[
   1819 				    nextbucket].pgfl_queues[PGFL_ZEROS],
   1820 				    pg, listq.list);
   1821 				ucpu->pages[PGFL_ZEROS]++;
   1822 				uvmexp.free++;
   1823 				uvmexp.zeropages++;
   1824 			}
   1825 		}
   1826 		if (ucpu->pages[PGFL_UNKNOWN] < uvmexp.ncolors) {
   1827 			break;
   1828 		}
   1829 		nextbucket = (nextbucket + 1) & uvmexp.colormask;
   1830 	} while (nextbucket != firstbucket);
   1831 	ucpu->page_idle_zero = false;
   1832  quit:
   1833 	mutex_spin_exit(&uvm_fpageqlock);
   1834 }
   1835 
   1836 /*
   1837  * uvm_pagelookup: look up a page
   1838  *
   1839  * => caller should lock object to keep someone from pulling the page
   1840  *	out from under it
   1841  */
   1842 
   1843 struct vm_page *
   1844 uvm_pagelookup(struct uvm_object *obj, voff_t off)
   1845 {
   1846 	struct vm_page *pg;
   1847 
   1848 	KASSERT(mutex_owned(obj->vmobjlock));
   1849 
   1850 	pg = rb_tree_find_node(&obj->rb_tree, &off);
   1851 
   1852 	KASSERT(pg == NULL || obj->uo_npages != 0);
   1853 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1854 		(pg->flags & PG_BUSY) != 0);
   1855 	return pg;
   1856 }
   1857 
   1858 /*
   1859  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
   1860  *
   1861  * => caller must lock page queues
   1862  */
   1863 
   1864 void
   1865 uvm_pagewire(struct vm_page *pg)
   1866 {
   1867 	KASSERT(mutex_owned(&uvm_pageqlock));
   1868 #if defined(READAHEAD_STATS)
   1869 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1870 		uvm_ra_hit.ev_count++;
   1871 		pg->pqflags &= ~PQ_READAHEAD;
   1872 	}
   1873 #endif /* defined(READAHEAD_STATS) */
   1874 	if (pg->wire_count == 0) {
   1875 		uvm_pagedequeue(pg);
   1876 		uvmexp.wired++;
   1877 	}
   1878 	pg->wire_count++;
   1879 }
   1880 
   1881 /*
   1882  * uvm_pageunwire: unwire the page.
   1883  *
   1884  * => activate if wire count goes to zero.
   1885  * => caller must lock page queues
   1886  */
   1887 
   1888 void
   1889 uvm_pageunwire(struct vm_page *pg)
   1890 {
   1891 	KASSERT(mutex_owned(&uvm_pageqlock));
   1892 	pg->wire_count--;
   1893 	if (pg->wire_count == 0) {
   1894 		uvm_pageactivate(pg);
   1895 		uvmexp.wired--;
   1896 	}
   1897 }
   1898 
   1899 /*
   1900  * uvm_pagedeactivate: deactivate page
   1901  *
   1902  * => caller must lock page queues
   1903  * => caller must check to make sure page is not wired
   1904  * => object that page belongs to must be locked (so we can adjust pg->flags)
   1905  * => caller must clear the reference on the page before calling
   1906  */
   1907 
   1908 void
   1909 uvm_pagedeactivate(struct vm_page *pg)
   1910 {
   1911 
   1912 	KASSERT(mutex_owned(&uvm_pageqlock));
   1913 	KASSERT(uvm_page_locked_p(pg));
   1914 	KASSERT(pg->wire_count != 0 || uvmpdpol_pageisqueued_p(pg));
   1915 	uvmpdpol_pagedeactivate(pg);
   1916 }
   1917 
   1918 /*
   1919  * uvm_pageactivate: activate page
   1920  *
   1921  * => caller must lock page queues
   1922  */
   1923 
   1924 void
   1925 uvm_pageactivate(struct vm_page *pg)
   1926 {
   1927 
   1928 	KASSERT(mutex_owned(&uvm_pageqlock));
   1929 	KASSERT(uvm_page_locked_p(pg));
   1930 #if defined(READAHEAD_STATS)
   1931 	if ((pg->pqflags & PQ_READAHEAD) != 0) {
   1932 		uvm_ra_hit.ev_count++;
   1933 		pg->pqflags &= ~PQ_READAHEAD;
   1934 	}
   1935 #endif /* defined(READAHEAD_STATS) */
   1936 	if (pg->wire_count != 0) {
   1937 		return;
   1938 	}
   1939 	uvmpdpol_pageactivate(pg);
   1940 }
   1941 
   1942 /*
   1943  * uvm_pagedequeue: remove a page from any paging queue
   1944  */
   1945 
   1946 void
   1947 uvm_pagedequeue(struct vm_page *pg)
   1948 {
   1949 
   1950 	if (uvmpdpol_pageisqueued_p(pg)) {
   1951 		KASSERT(mutex_owned(&uvm_pageqlock));
   1952 	}
   1953 
   1954 	uvmpdpol_pagedequeue(pg);
   1955 }
   1956 
   1957 /*
   1958  * uvm_pageenqueue: add a page to a paging queue without activating.
   1959  * used where a page is not really demanded (yet).  eg. read-ahead
   1960  */
   1961 
   1962 void
   1963 uvm_pageenqueue(struct vm_page *pg)
   1964 {
   1965 
   1966 	KASSERT(mutex_owned(&uvm_pageqlock));
   1967 	if (pg->wire_count != 0) {
   1968 		return;
   1969 	}
   1970 	uvmpdpol_pageenqueue(pg);
   1971 }
   1972 
   1973 /*
   1974  * uvm_pagezero: zero fill a page
   1975  *
   1976  * => if page is part of an object then the object should be locked
   1977  *	to protect pg->flags.
   1978  */
   1979 
   1980 void
   1981 uvm_pagezero(struct vm_page *pg)
   1982 {
   1983 	pg->flags &= ~PG_CLEAN;
   1984 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1985 }
   1986 
   1987 /*
   1988  * uvm_pagecopy: copy a page
   1989  *
   1990  * => if page is part of an object then the object should be locked
   1991  *	to protect pg->flags.
   1992  */
   1993 
   1994 void
   1995 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
   1996 {
   1997 
   1998 	dst->flags &= ~PG_CLEAN;
   1999 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
   2000 }
   2001 
   2002 /*
   2003  * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
   2004  */
   2005 
   2006 bool
   2007 uvm_pageismanaged(paddr_t pa)
   2008 {
   2009 
   2010 	return (vm_physseg_find(atop(pa), NULL) != -1);
   2011 }
   2012 
   2013 /*
   2014  * uvm_page_lookup_freelist: look up the free list for the specified page
   2015  */
   2016 
   2017 int
   2018 uvm_page_lookup_freelist(struct vm_page *pg)
   2019 {
   2020 	int lcv;
   2021 
   2022 	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
   2023 	KASSERT(lcv != -1);
   2024 	return (VM_PHYSMEM_PTR(lcv)->free_list);
   2025 }
   2026 
   2027 /*
   2028  * uvm_page_locked_p: return true if object associated with page is
   2029  * locked.  this is a weak check for runtime assertions only.
   2030  */
   2031 
   2032 bool
   2033 uvm_page_locked_p(struct vm_page *pg)
   2034 {
   2035 
   2036 	if (pg->uobject != NULL) {
   2037 		return mutex_owned(pg->uobject->vmobjlock);
   2038 	}
   2039 	if (pg->uanon != NULL) {
   2040 		return mutex_owned(pg->uanon->an_lock);
   2041 	}
   2042 	return true;
   2043 }
   2044 
   2045 #if defined(DDB) || defined(DEBUGPRINT)
   2046 
   2047 /*
   2048  * uvm_page_printit: actually print the page
   2049  */
   2050 
   2051 static const char page_flagbits[] = UVM_PGFLAGBITS;
   2052 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
   2053 
   2054 void
   2055 uvm_page_printit(struct vm_page *pg, bool full,
   2056     void (*pr)(const char *, ...))
   2057 {
   2058 	struct vm_page *tpg;
   2059 	struct uvm_object *uobj;
   2060 	struct pgflist *pgl;
   2061 	char pgbuf[128];
   2062 	char pqbuf[128];
   2063 
   2064 	(*pr)("PAGE %p:\n", pg);
   2065 	snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
   2066 	snprintb(pqbuf, sizeof(pqbuf), page_pqflagbits, pg->pqflags);
   2067 	(*pr)("  flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
   2068 	    pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
   2069 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
   2070 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
   2071 #if defined(UVM_PAGE_TRKOWN)
   2072 	if (pg->flags & PG_BUSY)
   2073 		(*pr)("  owning process = %d, tag=%s\n",
   2074 		    pg->owner, pg->owner_tag);
   2075 	else
   2076 		(*pr)("  page not busy, no owner\n");
   2077 #else
   2078 	(*pr)("  [page ownership tracking disabled]\n");
   2079 #endif
   2080 
   2081 	if (!full)
   2082 		return;
   2083 
   2084 	/* cross-verify object/anon */
   2085 	if ((pg->pqflags & PQ_FREE) == 0) {
   2086 		if (pg->pqflags & PQ_ANON) {
   2087 			if (pg->uanon == NULL || pg->uanon->an_page != pg)
   2088 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
   2089 				(pg->uanon) ? pg->uanon->an_page : NULL);
   2090 			else
   2091 				(*pr)("  anon backpointer is OK\n");
   2092 		} else {
   2093 			uobj = pg->uobject;
   2094 			if (uobj) {
   2095 				(*pr)("  checking object list\n");
   2096 				TAILQ_FOREACH(tpg, &uobj->memq, listq.queue) {
   2097 					if (tpg == pg) {
   2098 						break;
   2099 					}
   2100 				}
   2101 				if (tpg)
   2102 					(*pr)("  page found on object list\n");
   2103 				else
   2104 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
   2105 			}
   2106 		}
   2107 	}
   2108 
   2109 	/* cross-verify page queue */
   2110 	if (pg->pqflags & PQ_FREE) {
   2111 		int fl = uvm_page_lookup_freelist(pg);
   2112 		int color = VM_PGCOLOR_BUCKET(pg);
   2113 		pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
   2114 		    ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
   2115 	} else {
   2116 		pgl = NULL;
   2117 	}
   2118 
   2119 	if (pgl) {
   2120 		(*pr)("  checking pageq list\n");
   2121 		LIST_FOREACH(tpg, pgl, pageq.list) {
   2122 			if (tpg == pg) {
   2123 				break;
   2124 			}
   2125 		}
   2126 		if (tpg)
   2127 			(*pr)("  page found on pageq list\n");
   2128 		else
   2129 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
   2130 	}
   2131 }
   2132 
   2133 /*
   2134  * uvm_pages_printthem - print a summary of all managed pages
   2135  */
   2136 
   2137 void
   2138 uvm_page_printall(void (*pr)(const char *, ...))
   2139 {
   2140 	unsigned i;
   2141 	struct vm_page *pg;
   2142 
   2143 	(*pr)("%18s %4s %4s %18s %18s"
   2144 #ifdef UVM_PAGE_TRKOWN
   2145 	    " OWNER"
   2146 #endif
   2147 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
   2148 	for (i = 0; i < vm_nphysmem; i++) {
   2149 		for (pg = VM_PHYSMEM_PTR(i)->pgs; pg < VM_PHYSMEM_PTR(i)->lastpg; pg++) {
   2150 			(*pr)("%18p %04x %04x %18p %18p",
   2151 			    pg, pg->flags, pg->pqflags, pg->uobject,
   2152 			    pg->uanon);
   2153 #ifdef UVM_PAGE_TRKOWN
   2154 			if (pg->flags & PG_BUSY)
   2155 				(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
   2156 #endif
   2157 			(*pr)("\n");
   2158 		}
   2159 	}
   2160 }
   2161 
   2162 #endif /* DDB || DEBUGPRINT */
   2163