Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.214
      1 /*	$NetBSD: uvm_page.c,v 1.214 2019/12/27 13:19:24 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     34  * Copyright (c) 1991, 1993, The Regents of the University of California.
     35  *
     36  * All rights reserved.
     37  *
     38  * This code is derived from software contributed to Berkeley by
     39  * The Mach Operating System project at Carnegie-Mellon University.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     66  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     67  *
     68  *
     69  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     70  * All rights reserved.
     71  *
     72  * Permission to use, copy, modify and distribute this software and
     73  * its documentation is hereby granted, provided that both the copyright
     74  * notice and this permission notice appear in all copies of the
     75  * software, derivative works or modified versions, and any portions
     76  * thereof, and that both notices appear in supporting documentation.
     77  *
     78  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     79  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     80  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     81  *
     82  * Carnegie Mellon requests users of this software to return to
     83  *
     84  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     85  *  School of Computer Science
     86  *  Carnegie Mellon University
     87  *  Pittsburgh PA 15213-3890
     88  *
     89  * any improvements or extensions that they make and grant Carnegie the
     90  * rights to redistribute these changes.
     91  */
     92 
     93 /*
     94  * uvm_page.c: page ops.
     95  */
     96 
     97 #include <sys/cdefs.h>
     98 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.214 2019/12/27 13:19:24 ad Exp $");
     99 
    100 #include "opt_ddb.h"
    101 #include "opt_uvm.h"
    102 #include "opt_uvmhist.h"
    103 #include "opt_readahead.h"
    104 
    105 #include <sys/param.h>
    106 #include <sys/systm.h>
    107 #include <sys/sched.h>
    108 #include <sys/kernel.h>
    109 #include <sys/vnode.h>
    110 #include <sys/proc.h>
    111 #include <sys/radixtree.h>
    112 #include <sys/atomic.h>
    113 #include <sys/cpu.h>
    114 #include <sys/extent.h>
    115 
    116 #include <uvm/uvm.h>
    117 #include <uvm/uvm_ddb.h>
    118 #include <uvm/uvm_pdpolicy.h>
    119 #include <uvm/uvm_pgflcache.h>
    120 
    121 /*
    122  * Some supported CPUs in a given architecture don't support all
    123  * of the things necessary to do idle page zero'ing efficiently.
    124  * We therefore provide a way to enable it from machdep code here.
    125  */
    126 bool vm_page_zero_enable = false;
    127 
    128 /*
    129  * number of pages per-CPU to reserve for the kernel.
    130  */
    131 #ifndef	UVM_RESERVED_PAGES_PER_CPU
    132 #define	UVM_RESERVED_PAGES_PER_CPU	5
    133 #endif
    134 int vm_page_reserve_kernel = UVM_RESERVED_PAGES_PER_CPU;
    135 
    136 /*
    137  * physical memory size;
    138  */
    139 psize_t physmem;
    140 
    141 /*
    142  * local variables
    143  */
    144 
    145 /*
    146  * these variables record the values returned by vm_page_bootstrap,
    147  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    148  * and pmap_startup here also uses them internally.
    149  */
    150 
    151 static vaddr_t      virtual_space_start;
    152 static vaddr_t      virtual_space_end;
    153 
    154 /*
    155  * we allocate an initial number of page colors in uvm_page_init(),
    156  * and remember them.  We may re-color pages as cache sizes are
    157  * discovered during the autoconfiguration phase.  But we can never
    158  * free the initial set of buckets, since they are allocated using
    159  * uvm_pageboot_alloc().
    160  */
    161 
    162 static size_t recolored_pages_memsize /* = 0 */;
    163 static char *recolored_pages_mem;
    164 
    165 /*
    166  * freelist locks - one per bucket.
    167  */
    168 
    169 union uvm_freelist_lock	uvm_freelist_locks[PGFL_MAX_BUCKETS]
    170     __cacheline_aligned;
    171 
    172 /*
    173  * basic NUMA information.
    174  */
    175 
    176 static struct uvm_page_numa_region {
    177 	struct uvm_page_numa_region	*next;
    178 	paddr_t				start;
    179 	paddr_t				size;
    180 	u_int				numa_id;
    181 } *uvm_page_numa_region;
    182 
    183 #ifdef DEBUG
    184 vaddr_t uvm_zerocheckkva;
    185 #endif /* DEBUG */
    186 
    187 /*
    188  * These functions are reserved for uvm(9) internal use and are not
    189  * exported in the header file uvm_physseg.h
    190  *
    191  * Thus they are redefined here.
    192  */
    193 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    194 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    195 
    196 /* returns a pgs array */
    197 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    198 
    199 /*
    200  * inline functions
    201  */
    202 
    203 /*
    204  * uvm_pageinsert: insert a page in the object.
    205  *
    206  * => caller must lock object
    207  * => call should have already set pg's object and offset pointers
    208  *    and bumped the version counter
    209  */
    210 
    211 static inline void
    212 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
    213 {
    214 
    215 	KASSERT(uobj == pg->uobject);
    216 	KASSERT(mutex_owned(uobj->vmobjlock));
    217 	KASSERT((pg->flags & PG_TABLED) == 0);
    218 
    219 	if (UVM_OBJ_IS_VNODE(uobj)) {
    220 		if (uobj->uo_npages == 0) {
    221 			struct vnode *vp = (struct vnode *)uobj;
    222 
    223 			vholdl(vp);
    224 		}
    225 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    226 			cpu_count(CPU_COUNT_EXECPAGES, 1);
    227 		} else {
    228 			cpu_count(CPU_COUNT_FILEPAGES, 1);
    229 		}
    230 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    231 		cpu_count(CPU_COUNT_ANONPAGES, 1);
    232 	}
    233 	pg->flags |= PG_TABLED;
    234 	uobj->uo_npages++;
    235 }
    236 
    237 static inline int
    238 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
    239 {
    240 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
    241 	int error;
    242 
    243 	error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
    244 	if (error != 0) {
    245 		return error;
    246 	}
    247 	return 0;
    248 }
    249 
    250 /*
    251  * uvm_page_remove: remove page from object.
    252  *
    253  * => caller must lock object
    254  */
    255 
    256 static inline void
    257 uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
    258 {
    259 
    260 	KASSERT(uobj == pg->uobject);
    261 	KASSERT(mutex_owned(uobj->vmobjlock));
    262 	KASSERT(pg->flags & PG_TABLED);
    263 
    264 	if (UVM_OBJ_IS_VNODE(uobj)) {
    265 		if (uobj->uo_npages == 1) {
    266 			struct vnode *vp = (struct vnode *)uobj;
    267 
    268 			holdrelel(vp);
    269 		}
    270 		if (UVM_OBJ_IS_VTEXT(uobj)) {
    271 			cpu_count(CPU_COUNT_EXECPAGES, -1);
    272 		} else {
    273 			cpu_count(CPU_COUNT_FILEPAGES, -1);
    274 		}
    275 	} else if (UVM_OBJ_IS_AOBJ(uobj)) {
    276 		cpu_count(CPU_COUNT_ANONPAGES, -1);
    277 	}
    278 
    279 	/* object should be locked */
    280 	uobj->uo_npages--;
    281 	pg->flags &= ~PG_TABLED;
    282 	pg->uobject = NULL;
    283 }
    284 
    285 static inline void
    286 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
    287 {
    288 	struct vm_page *opg __unused;
    289 
    290 	opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
    291 	KASSERT(pg == opg);
    292 }
    293 
    294 static void
    295 uvm_page_init_bucket(struct pgfreelist *pgfl, struct pgflbucket *pgb, int num)
    296 {
    297 	int i;
    298 
    299 	pgb->pgb_nfree = 0;
    300 	for (i = 0; i < uvmexp.ncolors; i++) {
    301 		LIST_INIT(&pgb->pgb_colors[i]);
    302 	}
    303 	pgfl->pgfl_buckets[num] = pgb;
    304 }
    305 
    306 /*
    307  * uvm_page_init: init the page system.   called from uvm_init().
    308  *
    309  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    310  */
    311 
    312 void
    313 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
    314 {
    315 	static struct uvm_cpu boot_cpu __cacheline_aligned;
    316 	psize_t freepages, pagecount, bucketsize, n;
    317 	struct pgflbucket *pgb;
    318 	struct vm_page *pagearray;
    319 	char *bucketarray;
    320 	uvm_physseg_t bank;
    321 	int fl, b;
    322 
    323 	KASSERT(ncpu <= 1);
    324 
    325 	/*
    326 	 * init the page queues and free page queue locks, except the
    327 	 * free list; we allocate that later (with the initial vm_page
    328 	 * structures).
    329 	 */
    330 
    331 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
    332 	uvmpdpol_init();
    333 	for (b = 0; b < __arraycount(uvm_freelist_locks); b++) {
    334 		mutex_init(&uvm_freelist_locks[b].lock, MUTEX_DEFAULT, IPL_VM);
    335 	}
    336 
    337 	/*
    338 	 * allocate vm_page structures.
    339 	 */
    340 
    341 	/*
    342 	 * sanity check:
    343 	 * before calling this function the MD code is expected to register
    344 	 * some free RAM with the uvm_page_physload() function.   our job
    345 	 * now is to allocate vm_page structures for this memory.
    346 	 */
    347 
    348 	if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
    349 		panic("uvm_page_bootstrap: no memory pre-allocated");
    350 
    351 	/*
    352 	 * first calculate the number of free pages...
    353 	 *
    354 	 * note that we use start/end rather than avail_start/avail_end.
    355 	 * this allows us to allocate extra vm_page structures in case we
    356 	 * want to return some memory to the pool after booting.
    357 	 */
    358 
    359 	freepages = 0;
    360 
    361 	for (bank = uvm_physseg_get_first();
    362 	     uvm_physseg_valid_p(bank) ;
    363 	     bank = uvm_physseg_get_next(bank)) {
    364 		freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
    365 	}
    366 
    367 	/*
    368 	 * Let MD code initialize the number of colors, or default
    369 	 * to 1 color if MD code doesn't care.
    370 	 */
    371 	if (uvmexp.ncolors == 0)
    372 		uvmexp.ncolors = 1;
    373 	uvmexp.colormask = uvmexp.ncolors - 1;
    374 	KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
    375 
    376 	/* We always start with only 1 bucket. */
    377 	uvm.bucketcount = 1;
    378 
    379 	/*
    380 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    381 	 * use.   for each page of memory we use we need a vm_page structure.
    382 	 * thus, the total number of pages we can use is the total size of
    383 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    384 	 * structure.   we add one to freepages as a fudge factor to avoid
    385 	 * truncation errors (since we can only allocate in terms of whole
    386 	 * pages).
    387 	 */
    388 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    389 	    (PAGE_SIZE + sizeof(struct vm_page));
    390 	bucketsize = offsetof(struct pgflbucket, pgb_colors[uvmexp.ncolors]);
    391 	bucketsize = roundup2(bucketsize, coherency_unit);
    392 	bucketarray = (void *)uvm_pageboot_alloc(
    393 	    bucketsize * VM_NFREELIST +
    394 	    pagecount * sizeof(struct vm_page));
    395 	pagearray = (struct vm_page *)
    396 	    (bucketarray + bucketsize * VM_NFREELIST);
    397 
    398 	for (fl = 0; fl < VM_NFREELIST; fl++) {
    399 		pgb = (struct pgflbucket *)(bucketarray + bucketsize * fl);
    400 		uvm_page_init_bucket(&uvm.page_free[fl], pgb, 0);
    401 	}
    402 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    403 
    404 	/*
    405 	 * init the freelist cache in the disabled state.
    406 	 */
    407 	uvm_pgflcache_init();
    408 
    409 	/*
    410 	 * init the vm_page structures and put them in the correct place.
    411 	 */
    412 	/* First init the extent */
    413 
    414 	for (bank = uvm_physseg_get_first(),
    415 		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
    416 	     uvm_physseg_valid_p(bank);
    417 	     bank = uvm_physseg_get_next(bank)) {
    418 
    419 		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    420 		uvm_physseg_seg_alloc_from_slab(bank, n);
    421 		uvm_physseg_init_seg(bank, pagearray);
    422 
    423 		/* set up page array pointers */
    424 		pagearray += n;
    425 		pagecount -= n;
    426 	}
    427 
    428 	/*
    429 	 * pass up the values of virtual_space_start and
    430 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    431 	 * layers of the VM.
    432 	 */
    433 
    434 	*kvm_startp = round_page(virtual_space_start);
    435 	*kvm_endp = trunc_page(virtual_space_end);
    436 #ifdef DEBUG
    437 	/*
    438 	 * steal kva for uvm_pagezerocheck().
    439 	 */
    440 	uvm_zerocheckkva = *kvm_startp;
    441 	*kvm_startp += PAGE_SIZE;
    442 #endif /* DEBUG */
    443 
    444 	/*
    445 	 * init various thresholds.
    446 	 */
    447 
    448 	uvmexp.reserve_pagedaemon = 1;
    449 	uvmexp.reserve_kernel = vm_page_reserve_kernel;
    450 
    451 	/*
    452 	 * done!
    453 	 */
    454 
    455 	uvm.page_init_done = true;
    456 }
    457 
    458 /*
    459  * uvm_pgfl_lock: lock all freelist buckets
    460  */
    461 
    462 void
    463 uvm_pgfl_lock(void)
    464 {
    465 	int i;
    466 
    467 	for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
    468 		mutex_spin_enter(&uvm_freelist_locks[i].lock);
    469 	}
    470 }
    471 
    472 /*
    473  * uvm_pgfl_unlock: unlock all freelist buckets
    474  */
    475 
    476 void
    477 uvm_pgfl_unlock(void)
    478 {
    479 	int i;
    480 
    481 	for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
    482 		mutex_spin_exit(&uvm_freelist_locks[i].lock);
    483 	}
    484 }
    485 
    486 /*
    487  * uvm_setpagesize: set the page size
    488  *
    489  * => sets page_shift and page_mask from uvmexp.pagesize.
    490  */
    491 
    492 void
    493 uvm_setpagesize(void)
    494 {
    495 
    496 	/*
    497 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
    498 	 * to be a constant (indicated by being a non-zero value).
    499 	 */
    500 	if (uvmexp.pagesize == 0) {
    501 		if (PAGE_SIZE == 0)
    502 			panic("uvm_setpagesize: uvmexp.pagesize not set");
    503 		uvmexp.pagesize = PAGE_SIZE;
    504 	}
    505 	uvmexp.pagemask = uvmexp.pagesize - 1;
    506 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    507 		panic("uvm_setpagesize: page size %u (%#x) not a power of two",
    508 		    uvmexp.pagesize, uvmexp.pagesize);
    509 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    510 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    511 			break;
    512 }
    513 
    514 /*
    515  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    516  */
    517 
    518 vaddr_t
    519 uvm_pageboot_alloc(vsize_t size)
    520 {
    521 	static bool initialized = false;
    522 	vaddr_t addr;
    523 #if !defined(PMAP_STEAL_MEMORY)
    524 	vaddr_t vaddr;
    525 	paddr_t paddr;
    526 #endif
    527 
    528 	/*
    529 	 * on first call to this function, initialize ourselves.
    530 	 */
    531 	if (initialized == false) {
    532 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    533 
    534 		/* round it the way we like it */
    535 		virtual_space_start = round_page(virtual_space_start);
    536 		virtual_space_end = trunc_page(virtual_space_end);
    537 
    538 		initialized = true;
    539 	}
    540 
    541 	/* round to page size */
    542 	size = round_page(size);
    543 	uvmexp.bootpages += atop(size);
    544 
    545 #if defined(PMAP_STEAL_MEMORY)
    546 
    547 	/*
    548 	 * defer bootstrap allocation to MD code (it may want to allocate
    549 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    550 	 * virtual_space_start/virtual_space_end if necessary.
    551 	 */
    552 
    553 	addr = pmap_steal_memory(size, &virtual_space_start,
    554 	    &virtual_space_end);
    555 
    556 	return(addr);
    557 
    558 #else /* !PMAP_STEAL_MEMORY */
    559 
    560 	/*
    561 	 * allocate virtual memory for this request
    562 	 */
    563 	if (virtual_space_start == virtual_space_end ||
    564 	    (virtual_space_end - virtual_space_start) < size)
    565 		panic("uvm_pageboot_alloc: out of virtual space");
    566 
    567 	addr = virtual_space_start;
    568 
    569 #ifdef PMAP_GROWKERNEL
    570 	/*
    571 	 * If the kernel pmap can't map the requested space,
    572 	 * then allocate more resources for it.
    573 	 */
    574 	if (uvm_maxkaddr < (addr + size)) {
    575 		uvm_maxkaddr = pmap_growkernel(addr + size);
    576 		if (uvm_maxkaddr < (addr + size))
    577 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    578 	}
    579 #endif
    580 
    581 	virtual_space_start += size;
    582 
    583 	/*
    584 	 * allocate and mapin physical pages to back new virtual pages
    585 	 */
    586 
    587 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    588 	    vaddr += PAGE_SIZE) {
    589 
    590 		if (!uvm_page_physget(&paddr))
    591 			panic("uvm_pageboot_alloc: out of memory");
    592 
    593 		/*
    594 		 * Note this memory is no longer managed, so using
    595 		 * pmap_kenter is safe.
    596 		 */
    597 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
    598 	}
    599 	pmap_update(pmap_kernel());
    600 	return(addr);
    601 #endif	/* PMAP_STEAL_MEMORY */
    602 }
    603 
    604 #if !defined(PMAP_STEAL_MEMORY)
    605 /*
    606  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    607  *
    608  * => attempt to allocate it off the end of a segment in which the "avail"
    609  *    values match the start/end values.   if we can't do that, then we
    610  *    will advance both values (making them equal, and removing some
    611  *    vm_page structures from the non-avail area).
    612  * => return false if out of memory.
    613  */
    614 
    615 /* subroutine: try to allocate from memory chunks on the specified freelist */
    616 static bool uvm_page_physget_freelist(paddr_t *, int);
    617 
    618 static bool
    619 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
    620 {
    621 	uvm_physseg_t lcv;
    622 
    623 	/* pass 1: try allocating from a matching end */
    624 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    625 	for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
    626 #else
    627 	for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
    628 #endif
    629 	{
    630 		if (uvm.page_init_done == true)
    631 			panic("uvm_page_physget: called _after_ bootstrap");
    632 
    633 		/* Try to match at front or back on unused segment */
    634 		if (uvm_page_physunload(lcv, freelist, paddrp))
    635 			return true;
    636 	}
    637 
    638 	/* pass2: forget about matching ends, just allocate something */
    639 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    640 	for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
    641 #else
    642 	for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
    643 #endif
    644 	{
    645 		/* Try the front regardless. */
    646 		if (uvm_page_physunload_force(lcv, freelist, paddrp))
    647 			return true;
    648 	}
    649 	return false;
    650 }
    651 
    652 bool
    653 uvm_page_physget(paddr_t *paddrp)
    654 {
    655 	int i;
    656 
    657 	/* try in the order of freelist preference */
    658 	for (i = 0; i < VM_NFREELIST; i++)
    659 		if (uvm_page_physget_freelist(paddrp, i) == true)
    660 			return (true);
    661 	return (false);
    662 }
    663 #endif /* PMAP_STEAL_MEMORY */
    664 
    665 /*
    666  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
    667  * back from an I/O mapping (ugh!).   used in some MD code as well.
    668  */
    669 struct vm_page *
    670 uvm_phys_to_vm_page(paddr_t pa)
    671 {
    672 	paddr_t pf = atop(pa);
    673 	paddr_t	off;
    674 	uvm_physseg_t	upm;
    675 
    676 	upm = uvm_physseg_find(pf, &off);
    677 	if (upm != UVM_PHYSSEG_TYPE_INVALID)
    678 		return uvm_physseg_get_pg(upm, off);
    679 	return(NULL);
    680 }
    681 
    682 paddr_t
    683 uvm_vm_page_to_phys(const struct vm_page *pg)
    684 {
    685 
    686 	return pg->phys_addr & ~(PAGE_SIZE - 1);
    687 }
    688 
    689 /*
    690  * uvm_page_numa_load: load NUMA range description.
    691  */
    692 void
    693 uvm_page_numa_load(paddr_t start, paddr_t size, u_int numa_id)
    694 {
    695 	struct uvm_page_numa_region *d;
    696 
    697 	KASSERT(numa_id < PGFL_MAX_BUCKETS);
    698 
    699 	d = kmem_alloc(sizeof(*d), KM_SLEEP);
    700 	d->start = start;
    701 	d->size = size;
    702 	d->numa_id = numa_id;
    703 	d->next = uvm_page_numa_region;
    704 	uvm_page_numa_region = d;
    705 }
    706 
    707 /*
    708  * uvm_page_numa_lookup: lookup NUMA node for the given page.
    709  */
    710 static u_int
    711 uvm_page_numa_lookup(struct vm_page *pg)
    712 {
    713 	struct uvm_page_numa_region *d;
    714 	static bool warned;
    715 	paddr_t pa;
    716 
    717 	KASSERT(uvm.numa_alloc);
    718 	KASSERT(uvm_page_numa_region != NULL);
    719 
    720 	pa = VM_PAGE_TO_PHYS(pg);
    721 	for (d = uvm_page_numa_region; d != NULL; d = d->next) {
    722 		if (pa >= d->start && pa < d->start + d->size) {
    723 			return d->numa_id;
    724 		}
    725 	}
    726 
    727 	if (!warned) {
    728 		printf("uvm_page_numa_lookup: failed, first pg=%p pa=%p\n",
    729 		    pg, (void *)VM_PAGE_TO_PHYS(pg));
    730 		warned = true;
    731 	}
    732 
    733 	return 0;
    734 }
    735 
    736 /*
    737  * uvm_page_redim: adjust freelist dimensions if they have changed.
    738  */
    739 
    740 static void
    741 uvm_page_redim(int newncolors, int newnbuckets)
    742 {
    743 	struct pgfreelist npgfl;
    744 	struct pgflbucket *opgb, *npgb;
    745 	struct pgflist *ohead, *nhead;
    746 	struct vm_page *pg;
    747 	size_t bucketsize, bucketmemsize, oldbucketmemsize;
    748 	int fl, ob, oc, nb, nc, obuckets, ocolors;
    749 	char *bucketarray, *oldbucketmem, *bucketmem;
    750 
    751 	KASSERT(((newncolors - 1) & newncolors) == 0);
    752 
    753 	/* Anything to do? */
    754 	if (newncolors <= uvmexp.ncolors &&
    755 	    newnbuckets == uvm.bucketcount) {
    756 		return;
    757 	}
    758 	if (uvm.page_init_done == false) {
    759 		uvmexp.ncolors = newncolors;
    760 		return;
    761 	}
    762 
    763 	bucketsize = offsetof(struct pgflbucket, pgb_colors[newncolors]);
    764 	bucketsize = roundup2(bucketsize, coherency_unit);
    765 	bucketmemsize = bucketsize * newnbuckets * VM_NFREELIST +
    766 	    coherency_unit - 1;
    767 	bucketmem = kmem_zalloc(bucketmemsize, KM_SLEEP);
    768 	bucketarray = (char *)roundup2((uintptr_t)bucketmem, coherency_unit);
    769 
    770 	ocolors = uvmexp.ncolors;
    771 	obuckets = uvm.bucketcount;
    772 
    773 	/* Freelist cache musn't be enabled. */
    774 	uvm_pgflcache_pause();
    775 
    776 	/* Make sure we should still do this. */
    777 	uvm_pgfl_lock();
    778 	if (newncolors <= uvmexp.ncolors &&
    779 	    newnbuckets == uvm.bucketcount) {
    780 		uvm_pgfl_unlock();
    781 		kmem_free(bucketmem, bucketmemsize);
    782 		return;
    783 	}
    784 
    785 	uvmexp.ncolors = newncolors;
    786 	uvmexp.colormask = uvmexp.ncolors - 1;
    787 	uvm.bucketcount = newnbuckets;
    788 
    789 	for (fl = 0; fl < VM_NFREELIST; fl++) {
    790 		/* Init new buckets in new freelist. */
    791 		memset(&npgfl, 0, sizeof(npgfl));
    792 		for (nb = 0; nb < newnbuckets; nb++) {
    793 			npgb = (struct pgflbucket *)bucketarray;
    794 			uvm_page_init_bucket(&npgfl, npgb, nb);
    795 			bucketarray += bucketsize;
    796 		}
    797 		/* Now transfer pages from the old freelist. */
    798 		for (nb = ob = 0; ob < obuckets; ob++) {
    799 			opgb = uvm.page_free[fl].pgfl_buckets[ob];
    800 			for (oc = 0; oc < ocolors; oc++) {
    801 				ohead = &opgb->pgb_colors[oc];
    802 				while ((pg = LIST_FIRST(ohead)) != NULL) {
    803 					LIST_REMOVE(pg, pageq.list);
    804 					/*
    805 					 * Here we decide on the NEW color &
    806 					 * bucket for the page.  For NUMA
    807 					 * we'll use the info that the
    808 					 * hardware gave us.  Otherwise we
    809 					 * just do a round-robin among the
    810 					 * buckets.
    811 					 */
    812 					KASSERT(
    813 					    uvm_page_get_bucket(pg) == ob);
    814 					KASSERT(fl ==
    815 					    uvm_page_get_freelist(pg));
    816 					if (uvm.numa_alloc) {
    817 						nb = uvm_page_numa_lookup(pg);
    818 					} else if (nb + 1 < newnbuckets) {
    819 						nb = nb + 1;
    820 					} else {
    821 						nb = 0;
    822 					}
    823 					uvm_page_set_bucket(pg, nb);
    824 					npgb = npgfl.pgfl_buckets[nb];
    825 					npgb->pgb_nfree++;
    826 					nc = VM_PGCOLOR(pg);
    827 					nhead = &npgb->pgb_colors[nc];
    828 					LIST_INSERT_HEAD(nhead, pg, pageq.list);
    829 				}
    830 			}
    831 		}
    832 		/* Install the new freelist. */
    833 		memcpy(&uvm.page_free[fl], &npgfl, sizeof(npgfl));
    834 	}
    835 
    836 	/* Unlock and free the old memory. */
    837 	oldbucketmemsize = recolored_pages_memsize;
    838 	oldbucketmem = recolored_pages_mem;
    839 	recolored_pages_memsize = bucketmemsize;
    840 	recolored_pages_mem = bucketmem;
    841 	uvm_pgfl_unlock();
    842 
    843 	if (oldbucketmemsize) {
    844 		kmem_free(oldbucketmem, oldbucketmemsize);
    845 	}
    846 
    847 	uvm_pgflcache_resume();
    848 
    849 	/*
    850 	 * this calls uvm_km_alloc() which may want to hold
    851 	 * uvm_freelist_lock.
    852 	 */
    853 	uvm_pager_realloc_emerg();
    854 }
    855 
    856 /*
    857  * uvm_page_recolor: Recolor the pages if the new color count is
    858  * larger than the old one.
    859  */
    860 
    861 void
    862 uvm_page_recolor(int newncolors)
    863 {
    864 
    865 	uvm_page_redim(newncolors, uvm.bucketcount);
    866 }
    867 
    868 /*
    869  * uvm_page_rebucket: Determine a bucket structure and redim the free
    870  * lists to match.
    871  */
    872 
    873 void
    874 uvm_page_rebucket(void)
    875 {
    876 	u_int min_numa, max_numa, npackage, shift;
    877 	struct cpu_info *ci, *ci2, *ci3;
    878 	CPU_INFO_ITERATOR cii;
    879 
    880 	/*
    881 	 * If we have more than one NUMA node, and the maximum NUMA node ID
    882 	 * is less than PGFL_MAX_BUCKETS, then we'll use NUMA distribution
    883 	 * for free pages.  uvm_pagefree() will not reassign pages to a
    884 	 * different bucket on free.
    885 	 */
    886 	min_numa = (u_int)-1;
    887 	max_numa = 0;
    888 	for (CPU_INFO_FOREACH(cii, ci)) {
    889 		if (ci->ci_numa_id < min_numa) {
    890 			min_numa = ci->ci_numa_id;
    891 		}
    892 		if (ci->ci_numa_id > max_numa) {
    893 			max_numa = ci->ci_numa_id;
    894 		}
    895 	}
    896 	if (min_numa != max_numa && max_numa < PGFL_MAX_BUCKETS) {
    897 #ifdef NUMA
    898 		/*
    899 		 * We can do this, and it seems to work well, but until
    900 		 * further experiments are done we'll stick with the cache
    901 		 * locality strategy.
    902 		 */
    903 		aprint_debug("UVM: using NUMA allocation scheme\n");
    904 		for (CPU_INFO_FOREACH(cii, ci)) {
    905 			ci->ci_data.cpu_uvm->pgflbucket = ci->ci_numa_id;
    906 		}
    907 		uvm.numa_alloc = true;
    908 	 	uvm_page_redim(uvmexp.ncolors, max_numa + 1);
    909 	 	return;
    910 #endif
    911 	}
    912 
    913 	/*
    914 	 * Otherwise we'll go with a scheme to maximise L2/L3 cache locality
    915 	 * and minimise lock contention.  Count the total number of CPU
    916 	 * packages, and then try to distribute the buckets among CPU
    917 	 * packages evenly.  uvm_pagefree() will reassign pages to the
    918 	 * freeing CPU's preferred bucket on free.
    919 	 */
    920 	npackage = 0;
    921 	ci = curcpu();
    922 	ci2 = ci;
    923 	do {
    924 		npackage++;
    925 		ci2 = ci2->ci_sibling[CPUREL_PEER];
    926 	} while (ci2 != ci);
    927 
    928 	/*
    929 	 * Figure out how to arrange the packages & buckets, and the total
    930 	 * number of buckets we need.  XXX 2 may not be the best factor.
    931 	 */
    932 	for (shift = 0; npackage > PGFL_MAX_BUCKETS; shift++) {
    933 		npackage >>= 1;
    934 	}
    935  	uvm_page_redim(uvmexp.ncolors, npackage);
    936 
    937  	/*
    938  	 * Now tell each CPU which bucket to use.  In the outer loop, scroll
    939  	 * through all CPU packages.
    940  	 */
    941  	npackage = 0;
    942 	ci = curcpu();
    943 	ci2 = ci;
    944 	do {
    945 		/*
    946 		 * In the inner loop, scroll through all CPUs in the package
    947 		 * and assign the same bucket ID.
    948 		 */
    949 		ci3 = ci2;
    950 		do {
    951 			ci3->ci_data.cpu_uvm->pgflbucket = npackage >> shift;
    952 			ci3 = ci3->ci_sibling[CPUREL_PACKAGE];
    953 		} while (ci3 != ci2);
    954 		npackage++;
    955 		ci2 = ci2->ci_sibling[CPUREL_PEER];
    956 	} while (ci2 != ci);
    957 
    958 	aprint_debug("UVM: using package allocation scheme, "
    959 	    "%d package(s) per bucket\n", 1 << shift);
    960 }
    961 
    962 /*
    963  * uvm_cpu_attach: initialize per-CPU data structures.
    964  */
    965 
    966 void
    967 uvm_cpu_attach(struct cpu_info *ci)
    968 {
    969 	struct uvm_cpu *ucpu;
    970 
    971 	/* Already done in uvm_page_init(). */
    972 	if (!CPU_IS_PRIMARY(ci)) {
    973 		/* Add more reserve pages for this CPU. */
    974 		uvmexp.reserve_kernel += vm_page_reserve_kernel;
    975 
    976 		/* Allocate per-CPU data structures. */
    977 		ucpu = kmem_zalloc(sizeof(struct uvm_cpu) + coherency_unit - 1,
    978 		    KM_SLEEP);
    979 		ucpu = (struct uvm_cpu *)roundup2((uintptr_t)ucpu,
    980 		    coherency_unit);
    981 		ci->ci_data.cpu_uvm = ucpu;
    982 	} else {
    983 		ucpu = ci->ci_data.cpu_uvm;
    984 	}
    985 
    986 	/*
    987 	 * Attach RNG source for this CPU's VM events
    988 	 */
    989         rnd_attach_source(&ucpu->rs, ci->ci_data.cpu_name, RND_TYPE_VM,
    990 	    RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
    991 	    RND_FLAG_ESTIMATE_VALUE);
    992 }
    993 
    994 /*
    995  * uvm_free: fetch the total amount of free memory in pages.  This can have a
    996  * detrimental effect on performance due to false sharing; don't call unless
    997  * needed.
    998  */
    999 
   1000 int
   1001 uvm_free(void)
   1002 {
   1003 	struct pgfreelist *pgfl;
   1004 	int fl, b, fpages;
   1005 
   1006 	fpages = 0;
   1007 	for (fl = 0; fl < VM_NFREELIST; fl++) {
   1008 		pgfl = &uvm.page_free[fl];
   1009 		for (b = 0; b < uvm.bucketcount; b++) {
   1010 			fpages += pgfl->pgfl_buckets[b]->pgb_nfree;
   1011 		}
   1012 	}
   1013 	return fpages;
   1014 }
   1015 
   1016 /*
   1017  * uvm_pagealloc_pgb: helper routine that tries to allocate any color from a
   1018  * specific freelist and specific bucket only.
   1019  *
   1020  * => must be at IPL_VM or higher to protect per-CPU data structures.
   1021  */
   1022 
   1023 static struct vm_page *
   1024 uvm_pagealloc_pgb(struct uvm_cpu *ucpu, int f, int b, int *trycolorp, int flags)
   1025 {
   1026 	int c, trycolor, colormask;
   1027 	struct pgflbucket *pgb;
   1028 	struct vm_page *pg;
   1029 	kmutex_t *lock;
   1030 
   1031 	/*
   1032 	 * Skip the bucket if empty, no lock needed.  There could be many
   1033 	 * empty freelists/buckets.
   1034 	 */
   1035 	pgb = uvm.page_free[f].pgfl_buckets[b];
   1036 	if (pgb->pgb_nfree == 0) {
   1037 		return NULL;
   1038 	}
   1039 
   1040 	/* Skip bucket if low on memory. */
   1041 	lock = &uvm_freelist_locks[b].lock;
   1042 	mutex_spin_enter(lock);
   1043 	if (__predict_false(pgb->pgb_nfree <= uvmexp.reserve_kernel)) {
   1044 		if ((flags & UVM_PGA_USERESERVE) == 0 ||
   1045 		    (pgb->pgb_nfree <= uvmexp.reserve_pagedaemon &&
   1046 		     curlwp != uvm.pagedaemon_lwp)) {
   1047 			mutex_spin_exit(lock);
   1048 		     	return NULL;
   1049 		}
   1050 	}
   1051 
   1052 	/* Try all page colors as needed. */
   1053 	c = trycolor = *trycolorp;
   1054 	colormask = uvmexp.colormask;
   1055 	do {
   1056 		pg = LIST_FIRST(&pgb->pgb_colors[c]);
   1057 		if (__predict_true(pg != NULL)) {
   1058 			/*
   1059 			 * Got a free page!  PG_FREE must be cleared under
   1060 			 * lock because of uvm_pglistalloc().
   1061 			 */
   1062 			LIST_REMOVE(pg, pageq.list);
   1063 			KASSERT(pg->flags & PG_FREE);
   1064 			pg->flags &= PG_ZERO;
   1065 			pgb->pgb_nfree--;
   1066 
   1067 			/*
   1068 			 * While we have the bucket locked and our data
   1069 			 * structures fresh in L1 cache, we have an ideal
   1070 			 * opportunity to grab some pages for the freelist
   1071 			 * cache without causing extra contention.  Only do
   1072 			 * so if we found pages in this CPU's preferred
   1073 			 * bucket.
   1074 			 */
   1075 			if (__predict_true(b == ucpu->pgflbucket)) {
   1076 				uvm_pgflcache_fill(ucpu, f, b, c);
   1077 			}
   1078 			mutex_spin_exit(lock);
   1079 			KASSERT(uvm_page_get_bucket(pg) == b);
   1080 			CPU_COUNT(c == trycolor ?
   1081 			    CPU_COUNT_COLORHIT : CPU_COUNT_COLORMISS, 1);
   1082 			CPU_COUNT(CPU_COUNT_CPUMISS, 1);
   1083 			*trycolorp = c;
   1084 			return pg;
   1085 		}
   1086 		c = (c + 1) & colormask;
   1087 	} while (c != trycolor);
   1088 	mutex_spin_exit(lock);
   1089 
   1090 	return NULL;
   1091 }
   1092 
   1093 /*
   1094  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat that allocates
   1095  * any color from any bucket, in a specific freelist.
   1096  *
   1097  * => must be at IPL_VM or higher to protect per-CPU data structures.
   1098  */
   1099 
   1100 static struct vm_page *
   1101 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int f, int *trycolorp, int flags)
   1102 {
   1103 	int b, trybucket, bucketcount;
   1104 	struct vm_page *pg;
   1105 
   1106 	/* Try for the exact thing in the per-CPU cache. */
   1107 	if ((pg = uvm_pgflcache_alloc(ucpu, f, *trycolorp)) != NULL) {
   1108 		CPU_COUNT(CPU_COUNT_CPUHIT, 1);
   1109 		CPU_COUNT(CPU_COUNT_COLORHIT, 1);
   1110 		return pg;
   1111 	}
   1112 
   1113 	/* Walk through all buckets, trying our preferred bucket first. */
   1114 	trybucket = ucpu->pgflbucket;
   1115 	b = trybucket;
   1116 	bucketcount = uvm.bucketcount;
   1117 	do {
   1118 		pg = uvm_pagealloc_pgb(ucpu, f, b, trycolorp, flags);
   1119 		if (pg != NULL) {
   1120 			return pg;
   1121 		}
   1122 		b = (b + 1 == bucketcount ? 0 : b + 1);
   1123 	} while (b != trybucket);
   1124 
   1125 	return NULL;
   1126 }
   1127 
   1128 /*
   1129  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
   1130  *
   1131  * => return null if no pages free
   1132  * => wake up pagedaemon if number of free pages drops below low water mark
   1133  * => if obj != NULL, obj must be locked (to put in obj's tree)
   1134  * => if anon != NULL, anon must be locked (to put in anon)
   1135  * => only one of obj or anon can be non-null
   1136  * => caller must activate/deactivate page if it is not wired.
   1137  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
   1138  * => policy decision: it is more important to pull a page off of the
   1139  *	appropriate priority free list than it is to get a zero'd or
   1140  *	unknown contents page.  This is because we live with the
   1141  *	consequences of a bad free list decision for the entire
   1142  *	lifetime of the page, e.g. if the page comes from memory that
   1143  *	is slower to access.
   1144  */
   1145 
   1146 struct vm_page *
   1147 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
   1148     int flags, int strat, int free_list)
   1149 {
   1150 	int zeroit = 0, color;
   1151 	int lcv, error, s;
   1152 	struct uvm_cpu *ucpu;
   1153 	struct vm_page *pg;
   1154 	lwp_t *l;
   1155 
   1156 	KASSERT(obj == NULL || anon == NULL);
   1157 	KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
   1158 	KASSERT(off == trunc_page(off));
   1159 	KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
   1160 	KASSERT(anon == NULL || anon->an_lock == NULL ||
   1161 	    mutex_owned(anon->an_lock));
   1162 
   1163 	/*
   1164 	 * This implements a global round-robin page coloring
   1165 	 * algorithm.
   1166 	 */
   1167 
   1168 	s = splvm();
   1169 	ucpu = curcpu()->ci_data.cpu_uvm;
   1170 	if (flags & UVM_FLAG_COLORMATCH) {
   1171 		color = atop(off) & uvmexp.colormask;
   1172 	} else {
   1173 		color = ucpu->pgflcolor;
   1174 	}
   1175 
   1176 	/*
   1177 	 * fail if any of these conditions is true:
   1178 	 * [1]  there really are no free pages, or
   1179 	 * [2]  only kernel "reserved" pages remain and
   1180 	 *        reserved pages have not been requested.
   1181 	 * [3]  only pagedaemon "reserved" pages remain and
   1182 	 *        the requestor isn't the pagedaemon.
   1183 	 * we make kernel reserve pages available if called by a
   1184 	 * kernel thread or a realtime thread.
   1185 	 */
   1186 	l = curlwp;
   1187 	if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
   1188 		flags |= UVM_PGA_USERESERVE;
   1189 	}
   1190 
   1191 	/* If the allocator's running in NUMA mode, go with NUMA strategy. */
   1192 	if (uvm.numa_alloc && strat == UVM_PGA_STRAT_NORMAL) {
   1193 		strat = UVM_PGA_STRAT_NUMA;
   1194 	}
   1195 
   1196  again:
   1197 	switch (strat) {
   1198 	case UVM_PGA_STRAT_NORMAL:
   1199 		/* Check freelists: descending priority (ascending id) order. */
   1200 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1201 			pg = uvm_pagealloc_pgfl(ucpu, lcv, &color, flags);
   1202 			if (pg != NULL) {
   1203 				goto gotit;
   1204 			}
   1205 		}
   1206 
   1207 		/* No pages free!  Have pagedaemon free some memory. */
   1208 		splx(s);
   1209 		uvm_kick_pdaemon();
   1210 		return NULL;
   1211 
   1212 	case UVM_PGA_STRAT_ONLY:
   1213 	case UVM_PGA_STRAT_FALLBACK:
   1214 		/* Attempt to allocate from the specified free list. */
   1215 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1216 		pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
   1217 		if (pg != NULL) {
   1218 			goto gotit;
   1219 		}
   1220 
   1221 		/* Fall back, if possible. */
   1222 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1223 			strat = UVM_PGA_STRAT_NORMAL;
   1224 			goto again;
   1225 		}
   1226 
   1227 		/* No pages free!  Have pagedaemon free some memory. */
   1228 		splx(s);
   1229 		uvm_kick_pdaemon();
   1230 		return NULL;
   1231 
   1232 	case UVM_PGA_STRAT_NUMA:
   1233 		/*
   1234 		 * NUMA strategy: allocating from the correct bucket is more
   1235 		 * important than observing freelist priority.  Look only to
   1236 		 * the current NUMA node; if that fails, we need to look to
   1237 		 * other NUMA nodes, so retry with the normal strategy.
   1238 		 */
   1239 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1240 			pg = uvm_pgflcache_alloc(ucpu, lcv, color);
   1241 			if (pg != NULL) {
   1242 				CPU_COUNT(CPU_COUNT_CPUHIT, 1);
   1243 				CPU_COUNT(CPU_COUNT_COLORHIT, 1);
   1244 				goto gotit;
   1245 			}
   1246 			pg = uvm_pagealloc_pgb(ucpu, lcv,
   1247 			    ucpu->pgflbucket, &color, flags);
   1248 			if (pg != NULL) {
   1249 				goto gotit;
   1250 			}
   1251 		}
   1252 		strat = UVM_PGA_STRAT_NORMAL;
   1253 		goto again;
   1254 
   1255 	default:
   1256 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1257 		/* NOTREACHED */
   1258 	}
   1259 
   1260  gotit:
   1261 	/*
   1262 	 * We now know which color we actually allocated from; set
   1263 	 * the next color accordingly.
   1264 	 */
   1265 
   1266 	ucpu->pgflcolor = (color + 1) & uvmexp.colormask;
   1267 
   1268 	/*
   1269 	 * while still at IPL_VM, update allocation statistics and remember
   1270 	 * if we have to zero the page
   1271 	 */
   1272 
   1273 	if (flags & UVM_PGA_ZERO) {
   1274 		if (pg->flags & PG_ZERO) {
   1275 		    	CPU_COUNT(CPU_COUNT_PGA_ZEROHIT, 1);
   1276 			zeroit = 0;
   1277 		} else {
   1278 		    	CPU_COUNT(CPU_COUNT_PGA_ZEROMISS, 1);
   1279 			zeroit = 1;
   1280 		}
   1281 	}
   1282 	if (pg->flags & PG_ZERO) {
   1283 	    	CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
   1284 	}
   1285 	if (anon) {
   1286 		CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
   1287 	}
   1288 	splx(s);
   1289 	KASSERT((pg->flags & ~(PG_ZERO|PG_FREE)) == 0);
   1290 
   1291 	/*
   1292 	 * assign the page to the object.  as the page was free, we know
   1293 	 * that pg->uobject and pg->uanon are NULL.  we only need to take
   1294 	 * the page's interlock if we are changing the values.
   1295 	 */
   1296 	if (anon != NULL || obj != NULL) {
   1297 		mutex_enter(&pg->interlock);
   1298 	}
   1299 	pg->offset = off;
   1300 	pg->uobject = obj;
   1301 	pg->uanon = anon;
   1302 	KASSERT(uvm_page_locked_p(pg));
   1303 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1304 	if (anon) {
   1305 		anon->an_page = pg;
   1306 		pg->flags |= PG_ANON;
   1307 		mutex_exit(&pg->interlock);
   1308 	} else if (obj) {
   1309 		uvm_pageinsert_object(obj, pg);
   1310 		mutex_exit(&pg->interlock);
   1311 		error = uvm_pageinsert_tree(obj, pg);
   1312 		if (error != 0) {
   1313 			mutex_enter(&pg->interlock);
   1314 			uvm_pageremove_object(obj, pg);
   1315 			mutex_exit(&pg->interlock);
   1316 			uvm_pagefree(pg);
   1317 			return NULL;
   1318 		}
   1319 	}
   1320 
   1321 #if defined(UVM_PAGE_TRKOWN)
   1322 	pg->owner_tag = NULL;
   1323 #endif
   1324 	UVM_PAGE_OWN(pg, "new alloc");
   1325 
   1326 	if (flags & UVM_PGA_ZERO) {
   1327 		/*
   1328 		 * A zero'd page is not clean.  If we got a page not already
   1329 		 * zero'd, then we have to zero it ourselves.
   1330 		 */
   1331 		pg->flags &= ~PG_CLEAN;
   1332 		if (zeroit)
   1333 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1334 	}
   1335 
   1336 	return(pg);
   1337 }
   1338 
   1339 /*
   1340  * uvm_pagereplace: replace a page with another
   1341  *
   1342  * => object must be locked
   1343  */
   1344 
   1345 void
   1346 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
   1347 {
   1348 	struct uvm_object *uobj = oldpg->uobject;
   1349 
   1350 	KASSERT((oldpg->flags & PG_TABLED) != 0);
   1351 	KASSERT(uobj != NULL);
   1352 	KASSERT((newpg->flags & PG_TABLED) == 0);
   1353 	KASSERT(newpg->uobject == NULL);
   1354 	KASSERT(mutex_owned(uobj->vmobjlock));
   1355 
   1356 	newpg->offset = oldpg->offset;
   1357 	uvm_pageremove_tree(uobj, oldpg);
   1358 	uvm_pageinsert_tree(uobj, newpg);
   1359 
   1360 	/* take page interlocks during rename */
   1361 	if (oldpg < newpg) {
   1362 		mutex_enter(&oldpg->interlock);
   1363 		mutex_enter(&newpg->interlock);
   1364 	} else {
   1365 		mutex_enter(&newpg->interlock);
   1366 		mutex_enter(&oldpg->interlock);
   1367 	}
   1368 	newpg->uobject = uobj;
   1369 	uvm_pageinsert_object(uobj, newpg);
   1370 	uvm_pageremove_object(uobj, oldpg);
   1371 	mutex_exit(&oldpg->interlock);
   1372 	mutex_exit(&newpg->interlock);
   1373 }
   1374 
   1375 /*
   1376  * uvm_pagerealloc: reallocate a page from one object to another
   1377  *
   1378  * => both objects must be locked
   1379  * => both interlocks must be held
   1380  */
   1381 
   1382 void
   1383 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
   1384 {
   1385 	/*
   1386 	 * remove it from the old object
   1387 	 */
   1388 
   1389 	if (pg->uobject) {
   1390 		uvm_pageremove_tree(pg->uobject, pg);
   1391 		uvm_pageremove_object(pg->uobject, pg);
   1392 	}
   1393 
   1394 	/*
   1395 	 * put it in the new object
   1396 	 */
   1397 
   1398 	if (newobj) {
   1399 		/*
   1400 		 * XXX we have no in-tree users of this functionality
   1401 		 */
   1402 		panic("uvm_pagerealloc: no impl");
   1403 	}
   1404 }
   1405 
   1406 #ifdef DEBUG
   1407 /*
   1408  * check if page is zero-filled
   1409  */
   1410 void
   1411 uvm_pagezerocheck(struct vm_page *pg)
   1412 {
   1413 	int *p, *ep;
   1414 
   1415 	KASSERT(uvm_zerocheckkva != 0);
   1416 
   1417 	/*
   1418 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
   1419 	 * uvm page allocator.
   1420 	 *
   1421 	 * it might be better to have "CPU-local temporary map" pmap interface.
   1422 	 */
   1423 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
   1424 	p = (int *)uvm_zerocheckkva;
   1425 	ep = (int *)((char *)p + PAGE_SIZE);
   1426 	pmap_update(pmap_kernel());
   1427 	while (p < ep) {
   1428 		if (*p != 0)
   1429 			panic("PG_ZERO page isn't zero-filled");
   1430 		p++;
   1431 	}
   1432 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
   1433 	/*
   1434 	 * pmap_update() is not necessary here because no one except us
   1435 	 * uses this VA.
   1436 	 */
   1437 }
   1438 #endif /* DEBUG */
   1439 
   1440 /*
   1441  * uvm_pagefree: free page
   1442  *
   1443  * => erase page's identity (i.e. remove from object)
   1444  * => put page on free list
   1445  * => caller must lock owning object (either anon or uvm_object)
   1446  * => assumes all valid mappings of pg are gone
   1447  */
   1448 
   1449 void
   1450 uvm_pagefree(struct vm_page *pg)
   1451 {
   1452 	struct pgfreelist *pgfl;
   1453 	struct pgflbucket *pgb;
   1454 	struct uvm_cpu *ucpu;
   1455 	kmutex_t *lock;
   1456 	int bucket, s;
   1457 	bool locked;
   1458 
   1459 #ifdef DEBUG
   1460 	if (pg->uobject == (void *)0xdeadbeef &&
   1461 	    pg->uanon == (void *)0xdeadbeef) {
   1462 		panic("uvm_pagefree: freeing free page %p", pg);
   1463 	}
   1464 #endif /* DEBUG */
   1465 
   1466 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1467 	KASSERT(!(pg->flags & PG_FREE));
   1468 	KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
   1469 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1470 		mutex_owned(pg->uanon->an_lock));
   1471 
   1472 	/*
   1473 	 * remove the page from the object's tree beore acquiring any page
   1474 	 * interlocks: this can acquire locks to free radixtree nodes.
   1475 	 */
   1476 	if (pg->uobject != NULL) {
   1477 		uvm_pageremove_tree(pg->uobject, pg);
   1478 	}
   1479 
   1480 	/*
   1481 	 * if the page is loaned, resolve the loan instead of freeing.
   1482 	 */
   1483 
   1484 	if (pg->loan_count) {
   1485 		KASSERT(pg->wire_count == 0);
   1486 
   1487 		/*
   1488 		 * if the page is owned by an anon then we just want to
   1489 		 * drop anon ownership.  the kernel will free the page when
   1490 		 * it is done with it.  if the page is owned by an object,
   1491 		 * remove it from the object and mark it dirty for the benefit
   1492 		 * of possible anon owners.
   1493 		 *
   1494 		 * regardless of previous ownership, wakeup any waiters,
   1495 		 * unbusy the page, and we're done.
   1496 		 */
   1497 
   1498 		mutex_enter(&pg->interlock);
   1499 		locked = true;
   1500 		if (pg->uobject != NULL) {
   1501 			uvm_pageremove_object(pg->uobject, pg);
   1502 			pg->flags &= ~PG_CLEAN;
   1503 		} else if (pg->uanon != NULL) {
   1504 			if ((pg->flags & PG_ANON) == 0) {
   1505 				pg->loan_count--;
   1506 			} else {
   1507 				pg->flags &= ~PG_ANON;
   1508 				cpu_count(CPU_COUNT_ANONPAGES, -1);
   1509 			}
   1510 			pg->uanon->an_page = NULL;
   1511 			pg->uanon = NULL;
   1512 		}
   1513 		if (pg->flags & PG_WANTED) {
   1514 			wakeup(pg);
   1515 		}
   1516 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
   1517 #ifdef UVM_PAGE_TRKOWN
   1518 		pg->owner_tag = NULL;
   1519 #endif
   1520 		if (pg->loan_count) {
   1521 			KASSERT(pg->uobject == NULL);
   1522 			mutex_exit(&pg->interlock);
   1523 			if (pg->uanon == NULL) {
   1524 				uvm_pagedequeue(pg);
   1525 			}
   1526 			return;
   1527 		}
   1528 	} else if (pg->uobject != NULL || pg->uanon != NULL ||
   1529 	           pg->wire_count != 0) {
   1530 		mutex_enter(&pg->interlock);
   1531 		locked = true;
   1532 	} else {
   1533 		locked = false;
   1534 	}
   1535 
   1536 	/*
   1537 	 * remove page from its object or anon.
   1538 	 */
   1539 	if (pg->uobject != NULL) {
   1540 		uvm_pageremove_object(pg->uobject, pg);
   1541 	} else if (pg->uanon != NULL) {
   1542 		pg->uanon->an_page = NULL;
   1543 		pg->uanon = NULL;
   1544 		cpu_count(CPU_COUNT_ANONPAGES, -1);
   1545 	}
   1546 
   1547 	/*
   1548 	 * if the page was wired, unwire it now.
   1549 	 */
   1550 
   1551 	if (pg->wire_count) {
   1552 		pg->wire_count = 0;
   1553 		atomic_dec_uint(&uvmexp.wired);
   1554 	}
   1555 	if (locked) {
   1556 		mutex_exit(&pg->interlock);
   1557 	}
   1558 
   1559 	/*
   1560 	 * now remove the page from the queues.
   1561 	 */
   1562 	uvm_pagedequeue(pg);
   1563 
   1564 	/*
   1565 	 * and put on free queue
   1566 	 */
   1567 
   1568 #ifdef DEBUG
   1569 	pg->uobject = (void *)0xdeadbeef;
   1570 	pg->uanon = (void *)0xdeadbeef;
   1571 	if (pg->flags & PG_ZERO)
   1572 		uvm_pagezerocheck(pg);
   1573 #endif /* DEBUG */
   1574 
   1575 	s = splvm();
   1576 	ucpu = curcpu()->ci_data.cpu_uvm;
   1577 
   1578 	/*
   1579 	 * If we're using the NUMA strategy, we'll only cache this page if
   1580 	 * it came from the local CPU's NUMA node.  Otherwise we're using
   1581 	 * the L2/L3 cache locality strategy and we'll cache anything.
   1582 	 */
   1583 	if (uvm.numa_alloc) {
   1584 		bucket = uvm_page_get_bucket(pg);
   1585 	} else {
   1586 		bucket = ucpu->pgflbucket;
   1587 		uvm_page_set_bucket(pg, bucket);
   1588 	}
   1589 
   1590 	/* Try to send the page to the per-CPU cache. */
   1591 	if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
   1592 		splx(s);
   1593 		return;
   1594 	}
   1595 
   1596 	/* Didn't work.  Never mind, send it to a global bucket. */
   1597 	pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
   1598 	pgb = pgfl->pgfl_buckets[bucket];
   1599 	lock = &uvm_freelist_locks[bucket].lock;
   1600 
   1601 	mutex_spin_enter(lock);
   1602 	/* PG_FREE must be set under lock because of uvm_pglistalloc(). */
   1603 	pg->flags = (pg->flags & PG_ZERO) | PG_FREE;
   1604 	LIST_INSERT_HEAD(&pgb->pgb_colors[VM_PGCOLOR(pg)], pg, pageq.list);
   1605 	pgb->pgb_nfree++;
   1606 	mutex_spin_exit(lock);
   1607 	splx(s);
   1608 }
   1609 
   1610 /*
   1611  * uvm_page_unbusy: unbusy an array of pages.
   1612  *
   1613  * => pages must either all belong to the same object, or all belong to anons.
   1614  * => if pages are object-owned, object must be locked.
   1615  * => if pages are anon-owned, anons must be locked.
   1616  * => caller must make sure that anon-owned pages are not PG_RELEASED.
   1617  */
   1618 
   1619 void
   1620 uvm_page_unbusy(struct vm_page **pgs, int npgs)
   1621 {
   1622 	struct vm_page *pg;
   1623 	int i;
   1624 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1625 
   1626 	for (i = 0; i < npgs; i++) {
   1627 		pg = pgs[i];
   1628 		if (pg == NULL || pg == PGO_DONTCARE) {
   1629 			continue;
   1630 		}
   1631 
   1632 		KASSERT(uvm_page_locked_p(pg));
   1633 		KASSERT(pg->flags & PG_BUSY);
   1634 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1635 		if (pg->flags & PG_WANTED) {
   1636 			/* XXXAD thundering herd problem. */
   1637 			wakeup(pg);
   1638 		}
   1639 		if (pg->flags & PG_RELEASED) {
   1640 			UVMHIST_LOG(ubchist, "releasing pg %#jx",
   1641 			    (uintptr_t)pg, 0, 0, 0);
   1642 			KASSERT(pg->uobject != NULL ||
   1643 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
   1644 			pg->flags &= ~PG_RELEASED;
   1645 			uvm_pagefree(pg);
   1646 		} else {
   1647 			UVMHIST_LOG(ubchist, "unbusying pg %#jx",
   1648 			    (uintptr_t)pg, 0, 0, 0);
   1649 			KASSERT((pg->flags & PG_FAKE) == 0);
   1650 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1651 			UVM_PAGE_OWN(pg, NULL);
   1652 		}
   1653 	}
   1654 }
   1655 
   1656 #if defined(UVM_PAGE_TRKOWN)
   1657 /*
   1658  * uvm_page_own: set or release page ownership
   1659  *
   1660  * => this is a debugging function that keeps track of who sets PG_BUSY
   1661  *	and where they do it.   it can be used to track down problems
   1662  *	such a process setting "PG_BUSY" and never releasing it.
   1663  * => page's object [if any] must be locked
   1664  * => if "tag" is NULL then we are releasing page ownership
   1665  */
   1666 void
   1667 uvm_page_own(struct vm_page *pg, const char *tag)
   1668 {
   1669 
   1670 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1671 	KASSERT((pg->flags & PG_WANTED) == 0);
   1672 	KASSERT(uvm_page_locked_p(pg));
   1673 
   1674 	/* gain ownership? */
   1675 	if (tag) {
   1676 		KASSERT((pg->flags & PG_BUSY) != 0);
   1677 		if (pg->owner_tag) {
   1678 			printf("uvm_page_own: page %p already owned "
   1679 			    "by proc %d [%s]\n", pg,
   1680 			    pg->owner, pg->owner_tag);
   1681 			panic("uvm_page_own");
   1682 		}
   1683 		pg->owner = curproc->p_pid;
   1684 		pg->lowner = curlwp->l_lid;
   1685 		pg->owner_tag = tag;
   1686 		return;
   1687 	}
   1688 
   1689 	/* drop ownership */
   1690 	KASSERT((pg->flags & PG_BUSY) == 0);
   1691 	if (pg->owner_tag == NULL) {
   1692 		printf("uvm_page_own: dropping ownership of an non-owned "
   1693 		    "page (%p)\n", pg);
   1694 		panic("uvm_page_own");
   1695 	}
   1696 	pg->owner_tag = NULL;
   1697 }
   1698 #endif
   1699 
   1700 /*
   1701  * uvm_pageidlezero: zero free pages while the system is idle.
   1702  */
   1703 void
   1704 uvm_pageidlezero(void)
   1705 {
   1706 
   1707 	/*
   1708 	 * Disabled for the moment.  Previous strategy too cache heavy.  In
   1709 	 * the future we may experiment with zeroing the pages held in the
   1710 	 * per-CPU cache (uvm_pgflcache).
   1711 	 */
   1712 }
   1713 
   1714 /*
   1715  * uvm_pagelookup: look up a page
   1716  *
   1717  * => caller should lock object to keep someone from pulling the page
   1718  *	out from under it
   1719  */
   1720 
   1721 struct vm_page *
   1722 uvm_pagelookup(struct uvm_object *obj, voff_t off)
   1723 {
   1724 	struct vm_page *pg;
   1725 
   1726 	/* No - used from DDB. KASSERT(mutex_owned(obj->vmobjlock)); */
   1727 
   1728 	pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
   1729 
   1730 	KASSERT(pg == NULL || obj->uo_npages != 0);
   1731 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1732 		(pg->flags & PG_BUSY) != 0);
   1733 	return pg;
   1734 }
   1735 
   1736 /*
   1737  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
   1738  *
   1739  * => caller must lock objects
   1740  */
   1741 
   1742 void
   1743 uvm_pagewire(struct vm_page *pg)
   1744 {
   1745 
   1746 	KASSERT(uvm_page_locked_p(pg));
   1747 #if defined(READAHEAD_STATS)
   1748 	if ((pg->flags & PG_READAHEAD) != 0) {
   1749 		uvm_ra_hit.ev_count++;
   1750 		pg->flags &= ~PG_READAHEAD;
   1751 	}
   1752 #endif /* defined(READAHEAD_STATS) */
   1753 	if (pg->wire_count == 0) {
   1754 		uvm_pagedequeue(pg);
   1755 		atomic_inc_uint(&uvmexp.wired);
   1756 	}
   1757 	mutex_enter(&pg->interlock);
   1758 	pg->wire_count++;
   1759 	mutex_exit(&pg->interlock);
   1760 	KASSERT(pg->wire_count > 0);	/* detect wraparound */
   1761 }
   1762 
   1763 /*
   1764  * uvm_pageunwire: unwire the page.
   1765  *
   1766  * => activate if wire count goes to zero.
   1767  * => caller must lock objects
   1768  */
   1769 
   1770 void
   1771 uvm_pageunwire(struct vm_page *pg)
   1772 {
   1773 
   1774 	KASSERT(uvm_page_locked_p(pg));
   1775 	KASSERT(pg->wire_count != 0);
   1776 	KASSERT(!uvmpdpol_pageisqueued_p(pg));
   1777 	mutex_enter(&pg->interlock);
   1778 	pg->wire_count--;
   1779 	mutex_exit(&pg->interlock);
   1780 	if (pg->wire_count == 0) {
   1781 		uvm_pageactivate(pg);
   1782 		KASSERT(uvmexp.wired != 0);
   1783 		atomic_dec_uint(&uvmexp.wired);
   1784 	}
   1785 }
   1786 
   1787 /*
   1788  * uvm_pagedeactivate: deactivate page
   1789  *
   1790  * => caller must lock objects
   1791  * => caller must check to make sure page is not wired
   1792  * => object that page belongs to must be locked (so we can adjust pg->flags)
   1793  * => caller must clear the reference on the page before calling
   1794  */
   1795 
   1796 void
   1797 uvm_pagedeactivate(struct vm_page *pg)
   1798 {
   1799 
   1800 	KASSERT(uvm_page_locked_p(pg));
   1801 	if (pg->wire_count == 0) {
   1802 		KASSERT(uvmpdpol_pageisqueued_p(pg));
   1803 		uvmpdpol_pagedeactivate(pg);
   1804 	}
   1805 }
   1806 
   1807 /*
   1808  * uvm_pageactivate: activate page
   1809  *
   1810  * => caller must lock objects
   1811  */
   1812 
   1813 void
   1814 uvm_pageactivate(struct vm_page *pg)
   1815 {
   1816 
   1817 	KASSERT(uvm_page_locked_p(pg));
   1818 #if defined(READAHEAD_STATS)
   1819 	if ((pg->flags & PG_READAHEAD) != 0) {
   1820 		uvm_ra_hit.ev_count++;
   1821 		pg->flags &= ~PG_READAHEAD;
   1822 	}
   1823 #endif /* defined(READAHEAD_STATS) */
   1824 	if (pg->wire_count == 0) {
   1825 		uvmpdpol_pageactivate(pg);
   1826 	}
   1827 }
   1828 
   1829 /*
   1830  * uvm_pagedequeue: remove a page from any paging queue
   1831  *
   1832  * => caller must lock objects
   1833  */
   1834 void
   1835 uvm_pagedequeue(struct vm_page *pg)
   1836 {
   1837 
   1838 	KASSERT(uvm_page_locked_p(pg));
   1839 	if (uvmpdpol_pageisqueued_p(pg)) {
   1840 		uvmpdpol_pagedequeue(pg);
   1841 	}
   1842 }
   1843 
   1844 /*
   1845  * uvm_pageenqueue: add a page to a paging queue without activating.
   1846  * used where a page is not really demanded (yet).  eg. read-ahead
   1847  *
   1848  * => caller must lock objects
   1849  */
   1850 void
   1851 uvm_pageenqueue(struct vm_page *pg)
   1852 {
   1853 
   1854 	KASSERT(uvm_page_locked_p(pg));
   1855 	if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
   1856 		uvmpdpol_pageenqueue(pg);
   1857 	}
   1858 }
   1859 
   1860 /*
   1861  * uvm_pagezero: zero fill a page
   1862  *
   1863  * => if page is part of an object then the object should be locked
   1864  *	to protect pg->flags.
   1865  */
   1866 
   1867 void
   1868 uvm_pagezero(struct vm_page *pg)
   1869 {
   1870 	pg->flags &= ~PG_CLEAN;
   1871 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1872 }
   1873 
   1874 /*
   1875  * uvm_pagecopy: copy a page
   1876  *
   1877  * => if page is part of an object then the object should be locked
   1878  *	to protect pg->flags.
   1879  */
   1880 
   1881 void
   1882 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
   1883 {
   1884 
   1885 	dst->flags &= ~PG_CLEAN;
   1886 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
   1887 }
   1888 
   1889 /*
   1890  * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
   1891  */
   1892 
   1893 bool
   1894 uvm_pageismanaged(paddr_t pa)
   1895 {
   1896 
   1897 	return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
   1898 }
   1899 
   1900 /*
   1901  * uvm_page_lookup_freelist: look up the free list for the specified page
   1902  */
   1903 
   1904 int
   1905 uvm_page_lookup_freelist(struct vm_page *pg)
   1906 {
   1907 	uvm_physseg_t upm;
   1908 
   1909 	upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
   1910 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
   1911 	return uvm_physseg_get_free_list(upm);
   1912 }
   1913 
   1914 /*
   1915  * uvm_page_locked_p: return true if object associated with page is
   1916  * locked.  this is a weak check for runtime assertions only.
   1917  */
   1918 
   1919 bool
   1920 uvm_page_locked_p(struct vm_page *pg)
   1921 {
   1922 
   1923 	if (pg->uobject != NULL) {
   1924 		return mutex_owned(pg->uobject->vmobjlock);
   1925 	}
   1926 	if (pg->uanon != NULL) {
   1927 		return mutex_owned(pg->uanon->an_lock);
   1928 	}
   1929 	return true;
   1930 }
   1931 
   1932 #ifdef PMAP_DIRECT
   1933 /*
   1934  * Call pmap to translate physical address into a virtual and to run a callback
   1935  * for it. Used to avoid actually mapping the pages, pmap most likely uses direct map
   1936  * or equivalent.
   1937  */
   1938 int
   1939 uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len,
   1940             int (*process)(void *, size_t, void *), void *arg)
   1941 {
   1942 	int error = 0;
   1943 	paddr_t pa;
   1944 	size_t todo;
   1945 	voff_t pgoff = (off & PAGE_MASK);
   1946 	struct vm_page *pg;
   1947 
   1948 	KASSERT(npages > 0 && len > 0);
   1949 
   1950 	for (int i = 0; i < npages; i++) {
   1951 		pg = pgs[i];
   1952 
   1953 		KASSERT(len > 0);
   1954 
   1955 		/*
   1956 		 * Caller is responsible for ensuring all the pages are
   1957 		 * available.
   1958 		 */
   1959 		KASSERT(pg != NULL && pg != PGO_DONTCARE);
   1960 
   1961 		pa = VM_PAGE_TO_PHYS(pg);
   1962 		todo = MIN(len, PAGE_SIZE - pgoff);
   1963 
   1964 		error = pmap_direct_process(pa, pgoff, todo, process, arg);
   1965 		if (error)
   1966 			break;
   1967 
   1968 		pgoff = 0;
   1969 		len -= todo;
   1970 	}
   1971 
   1972 	KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len);
   1973 	return error;
   1974 }
   1975 #endif /* PMAP_DIRECT */
   1976 
   1977 #if defined(DDB) || defined(DEBUGPRINT)
   1978 
   1979 /*
   1980  * uvm_page_printit: actually print the page
   1981  */
   1982 
   1983 static const char page_flagbits[] = UVM_PGFLAGBITS;
   1984 
   1985 void
   1986 uvm_page_printit(struct vm_page *pg, bool full,
   1987     void (*pr)(const char *, ...))
   1988 {
   1989 	struct vm_page *tpg;
   1990 	struct uvm_object *uobj;
   1991 	struct pgflbucket *pgb;
   1992 	struct pgflist *pgl;
   1993 	char pgbuf[128];
   1994 
   1995 	(*pr)("PAGE %p:\n", pg);
   1996 	snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
   1997 	(*pr)("  flags=%s, pqflags=%x, wire_count=%d, pa=0x%lx\n",
   1998 	    pgbuf, pg->pqflags, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
   1999 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
   2000 	    pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
   2001 	(*pr)("  bucket=%d freelist=%d\n",
   2002 	    uvm_page_get_bucket(pg), uvm_page_get_freelist(pg));
   2003 #if defined(UVM_PAGE_TRKOWN)
   2004 	if (pg->flags & PG_BUSY)
   2005 		(*pr)("  owning process = %d, tag=%s\n",
   2006 		    pg->owner, pg->owner_tag);
   2007 	else
   2008 		(*pr)("  page not busy, no owner\n");
   2009 #else
   2010 	(*pr)("  [page ownership tracking disabled]\n");
   2011 #endif
   2012 
   2013 	if (!full)
   2014 		return;
   2015 
   2016 	/* cross-verify object/anon */
   2017 	if ((pg->flags & PG_FREE) == 0) {
   2018 		if (pg->flags & PG_ANON) {
   2019 			if (pg->uanon == NULL || pg->uanon->an_page != pg)
   2020 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
   2021 				(pg->uanon) ? pg->uanon->an_page : NULL);
   2022 			else
   2023 				(*pr)("  anon backpointer is OK\n");
   2024 		} else {
   2025 			uobj = pg->uobject;
   2026 			if (uobj) {
   2027 				(*pr)("  checking object list\n");
   2028 				tpg = uvm_pagelookup(uobj, pg->offset);
   2029 				if (tpg)
   2030 					(*pr)("  page found on object list\n");
   2031 				else
   2032 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
   2033 			}
   2034 		}
   2035 	}
   2036 
   2037 	/* cross-verify page queue */
   2038 	if (pg->flags & PG_FREE) {
   2039 		int fl = uvm_page_get_freelist(pg);
   2040 		int b = uvm_page_get_bucket(pg);
   2041 		pgb = uvm.page_free[fl].pgfl_buckets[b];
   2042 		pgl = &pgb->pgb_colors[VM_PGCOLOR(pg)];
   2043 		(*pr)("  checking pageq list\n");
   2044 		LIST_FOREACH(tpg, pgl, pageq.list) {
   2045 			if (tpg == pg) {
   2046 				break;
   2047 			}
   2048 		}
   2049 		if (tpg)
   2050 			(*pr)("  page found on pageq list\n");
   2051 		else
   2052 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
   2053 	}
   2054 }
   2055 
   2056 /*
   2057  * uvm_page_printall - print a summary of all managed pages
   2058  */
   2059 
   2060 void
   2061 uvm_page_printall(void (*pr)(const char *, ...))
   2062 {
   2063 	uvm_physseg_t i;
   2064 	paddr_t pfn;
   2065 	struct vm_page *pg;
   2066 
   2067 	(*pr)("%18s %4s %4s %18s %18s"
   2068 #ifdef UVM_PAGE_TRKOWN
   2069 	    " OWNER"
   2070 #endif
   2071 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
   2072 	for (i = uvm_physseg_get_first();
   2073 	     uvm_physseg_valid_p(i);
   2074 	     i = uvm_physseg_get_next(i)) {
   2075 		for (pfn = uvm_physseg_get_start(i);
   2076 		     pfn < uvm_physseg_get_end(i);
   2077 		     pfn++) {
   2078 			pg = PHYS_TO_VM_PAGE(ptoa(pfn));
   2079 
   2080 			(*pr)("%18p %04x %08x %18p %18p",
   2081 			    pg, pg->flags, pg->pqflags, pg->uobject,
   2082 			    pg->uanon);
   2083 #ifdef UVM_PAGE_TRKOWN
   2084 			if (pg->flags & PG_BUSY)
   2085 				(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
   2086 #endif
   2087 			(*pr)("\n");
   2088 		}
   2089 	}
   2090 }
   2091 
   2092 /*
   2093  * uvm_page_print_freelists - print a summary freelists
   2094  */
   2095 
   2096 void
   2097 uvm_page_print_freelists(void (*pr)(const char *, ...))
   2098 {
   2099 	struct pgfreelist *pgfl;
   2100 	struct pgflbucket *pgb;
   2101 	int fl, b, c;
   2102 
   2103 	(*pr)("There are %d freelists with %d buckets of %d colors.\n\n",
   2104 	    VM_NFREELIST, uvm.bucketcount, uvmexp.ncolors);
   2105 
   2106 	for (fl = 0; fl < VM_NFREELIST; fl++) {
   2107 		pgfl = &uvm.page_free[fl];
   2108 		(*pr)("freelist(%d) @ %p\n", fl, pgfl);
   2109 		for (b = 0; b < uvm.bucketcount; b++) {
   2110 			pgb = uvm.page_free[fl].pgfl_buckets[b];
   2111 			(*pr)("    bucket(%d) @ %p, nfree = %d, lock @ %p:\n",
   2112 			    b, pgb, pgb->pgb_nfree,
   2113 			    &uvm_freelist_locks[b].lock);
   2114 			for (c = 0; c < uvmexp.ncolors; c++) {
   2115 				(*pr)("        color(%d) @ %p, ", c,
   2116 				    &pgb->pgb_colors[c]);
   2117 				(*pr)("first page = %p\n",
   2118 				    LIST_FIRST(&pgb->pgb_colors[c]));
   2119 			}
   2120 		}
   2121 	}
   2122 }
   2123 
   2124 #endif /* DDB || DEBUGPRINT */
   2125