Home | History | Annotate | Line # | Download | only in uvm
uvm_page.c revision 1.225
      1 /*	$NetBSD: uvm_page.c,v 1.225 2020/01/21 20:37:06 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1997 Charles D. Cranor and Washington University.
     34  * Copyright (c) 1991, 1993, The Regents of the University of California.
     35  *
     36  * All rights reserved.
     37  *
     38  * This code is derived from software contributed to Berkeley by
     39  * The Mach Operating System project at Carnegie-Mellon University.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
     66  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
     67  *
     68  *
     69  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     70  * All rights reserved.
     71  *
     72  * Permission to use, copy, modify and distribute this software and
     73  * its documentation is hereby granted, provided that both the copyright
     74  * notice and this permission notice appear in all copies of the
     75  * software, derivative works or modified versions, and any portions
     76  * thereof, and that both notices appear in supporting documentation.
     77  *
     78  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     79  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     80  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     81  *
     82  * Carnegie Mellon requests users of this software to return to
     83  *
     84  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     85  *  School of Computer Science
     86  *  Carnegie Mellon University
     87  *  Pittsburgh PA 15213-3890
     88  *
     89  * any improvements or extensions that they make and grant Carnegie the
     90  * rights to redistribute these changes.
     91  */
     92 
     93 /*
     94  * uvm_page.c: page ops.
     95  */
     96 
     97 #include <sys/cdefs.h>
     98 __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.225 2020/01/21 20:37:06 ad Exp $");
     99 
    100 #include "opt_ddb.h"
    101 #include "opt_uvm.h"
    102 #include "opt_uvmhist.h"
    103 #include "opt_readahead.h"
    104 
    105 #include <sys/param.h>
    106 #include <sys/systm.h>
    107 #include <sys/sched.h>
    108 #include <sys/kernel.h>
    109 #include <sys/vnode.h>
    110 #include <sys/proc.h>
    111 #include <sys/radixtree.h>
    112 #include <sys/atomic.h>
    113 #include <sys/cpu.h>
    114 #include <sys/extent.h>
    115 
    116 #include <uvm/uvm.h>
    117 #include <uvm/uvm_ddb.h>
    118 #include <uvm/uvm_pdpolicy.h>
    119 #include <uvm/uvm_pgflcache.h>
    120 
    121 /*
    122  * Some supported CPUs in a given architecture don't support all
    123  * of the things necessary to do idle page zero'ing efficiently.
    124  * We therefore provide a way to enable it from machdep code here.
    125  */
    126 bool vm_page_zero_enable = false;
    127 
    128 /*
    129  * number of pages per-CPU to reserve for the kernel.
    130  */
    131 #ifndef	UVM_RESERVED_PAGES_PER_CPU
    132 #define	UVM_RESERVED_PAGES_PER_CPU	5
    133 #endif
    134 int vm_page_reserve_kernel = UVM_RESERVED_PAGES_PER_CPU;
    135 
    136 /*
    137  * physical memory size;
    138  */
    139 psize_t physmem;
    140 
    141 /*
    142  * local variables
    143  */
    144 
    145 /*
    146  * these variables record the values returned by vm_page_bootstrap,
    147  * for debugging purposes.  The implementation of uvm_pageboot_alloc
    148  * and pmap_startup here also uses them internally.
    149  */
    150 
    151 static vaddr_t      virtual_space_start;
    152 static vaddr_t      virtual_space_end;
    153 
    154 /*
    155  * we allocate an initial number of page colors in uvm_page_init(),
    156  * and remember them.  We may re-color pages as cache sizes are
    157  * discovered during the autoconfiguration phase.  But we can never
    158  * free the initial set of buckets, since they are allocated using
    159  * uvm_pageboot_alloc().
    160  */
    161 
    162 static size_t recolored_pages_memsize /* = 0 */;
    163 static char *recolored_pages_mem;
    164 
    165 /*
    166  * freelist locks - one per bucket.
    167  */
    168 
    169 union uvm_freelist_lock	uvm_freelist_locks[PGFL_MAX_BUCKETS]
    170     __cacheline_aligned;
    171 
    172 /*
    173  * basic NUMA information.
    174  */
    175 
    176 static struct uvm_page_numa_region {
    177 	struct uvm_page_numa_region	*next;
    178 	paddr_t				start;
    179 	paddr_t				size;
    180 	u_int				numa_id;
    181 } *uvm_page_numa_region;
    182 
    183 #ifdef DEBUG
    184 kmutex_t uvm_zerochecklock __cacheline_aligned;
    185 vaddr_t uvm_zerocheckkva;
    186 #endif /* DEBUG */
    187 
    188 /*
    189  * These functions are reserved for uvm(9) internal use and are not
    190  * exported in the header file uvm_physseg.h
    191  *
    192  * Thus they are redefined here.
    193  */
    194 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
    195 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
    196 
    197 /* returns a pgs array */
    198 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
    199 
    200 /*
    201  * inline functions
    202  */
    203 
    204 /*
    205  * uvm_pageinsert: insert a page in the object.
    206  *
    207  * => caller must lock object
    208  * => call should have already set pg's object and offset pointers
    209  *    and bumped the version counter
    210  */
    211 
    212 static inline void
    213 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
    214 {
    215 
    216 	KASSERT(uobj == pg->uobject);
    217 	KASSERT(mutex_owned(uobj->vmobjlock));
    218 	KASSERT((pg->flags & PG_TABLED) == 0);
    219 
    220 	if ((pg->flags & PG_STAT) != 0) {
    221 		/* Cannot use uvm_pagegetdirty(): not yet in radix tree. */
    222 		const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
    223 		const bool isaobj = (pg->flags & PG_AOBJ) != 0;
    224 
    225 		if (!isaobj) {
    226 			KASSERT((pg->flags & PG_FILE) != 0);
    227 			if (uobj->uo_npages == 0) {
    228 				struct vnode *vp = (struct vnode *)uobj;
    229 
    230 				vholdl(vp);
    231 			}
    232 			kpreempt_disable();
    233 			if (UVM_OBJ_IS_VTEXT(uobj)) {
    234 				CPU_COUNT(CPU_COUNT_EXECPAGES, 1);
    235 			} else {
    236 				CPU_COUNT(CPU_COUNT_FILEPAGES, 1);
    237 			}
    238 			CPU_COUNT(CPU_COUNT_FILEUNKNOWN + status, 1);
    239 		} else {
    240 			kpreempt_disable();
    241 			CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
    242 			CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, 1);
    243 		}
    244 		kpreempt_enable();
    245 	}
    246 	pg->flags |= PG_TABLED;
    247 	uobj->uo_npages++;
    248 }
    249 
    250 static inline int
    251 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
    252 {
    253 	const uint64_t idx = pg->offset >> PAGE_SHIFT;
    254 	int error;
    255 
    256 	error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
    257 	if (error != 0) {
    258 		return error;
    259 	}
    260 	if ((pg->flags & PG_CLEAN) == 0) {
    261 		radix_tree_set_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG);
    262 	}
    263 	KASSERT(((pg->flags & PG_CLEAN) == 0) ==
    264 	    radix_tree_get_tag(&uobj->uo_pages, idx, UVM_PAGE_DIRTY_TAG));
    265 	return 0;
    266 }
    267 
    268 /*
    269  * uvm_page_remove: remove page from object.
    270  *
    271  * => caller must lock object
    272  */
    273 
    274 static inline void
    275 uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
    276 {
    277 
    278 	KASSERT(uobj == pg->uobject);
    279 	KASSERT(mutex_owned(uobj->vmobjlock));
    280 	KASSERT(pg->flags & PG_TABLED);
    281 
    282 	if ((pg->flags & PG_STAT) != 0) {
    283 		/* Cannot use uvm_pagegetdirty(): no longer in radix tree. */
    284 		const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
    285 		const bool isaobj = (pg->flags & PG_AOBJ) != 0;
    286 
    287 		if (!isaobj) {
    288 			KASSERT((pg->flags & PG_FILE) != 0);
    289 			if (uobj->uo_npages == 1) {
    290 				struct vnode *vp = (struct vnode *)uobj;
    291 
    292 				holdrelel(vp);
    293 			}
    294 			kpreempt_disable();
    295 			if (UVM_OBJ_IS_VTEXT(uobj)) {
    296 				CPU_COUNT(CPU_COUNT_EXECPAGES, -1);
    297 			} else {
    298 				CPU_COUNT(CPU_COUNT_FILEPAGES, -1);
    299 			}
    300 			CPU_COUNT(CPU_COUNT_FILEUNKNOWN + status, -1);
    301 		} else {
    302 			kpreempt_disable();
    303 			CPU_COUNT(CPU_COUNT_ANONPAGES, -1);
    304 			CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, -1);
    305 		}
    306 		kpreempt_enable();
    307 	}
    308 	uobj->uo_npages--;
    309 	pg->flags &= ~PG_TABLED;
    310 	pg->uobject = NULL;
    311 }
    312 
    313 static inline void
    314 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
    315 {
    316 	struct vm_page *opg __unused;
    317 
    318 	opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
    319 	KASSERT(pg == opg);
    320 }
    321 
    322 static void
    323 uvm_page_init_bucket(struct pgfreelist *pgfl, struct pgflbucket *pgb, int num)
    324 {
    325 	int i;
    326 
    327 	pgb->pgb_nfree = 0;
    328 	for (i = 0; i < uvmexp.ncolors; i++) {
    329 		LIST_INIT(&pgb->pgb_colors[i]);
    330 	}
    331 	pgfl->pgfl_buckets[num] = pgb;
    332 }
    333 
    334 /*
    335  * uvm_page_init: init the page system.   called from uvm_init().
    336  *
    337  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
    338  */
    339 
    340 void
    341 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
    342 {
    343 	static struct uvm_cpu boot_cpu __cacheline_aligned;
    344 	psize_t freepages, pagecount, bucketsize, n;
    345 	struct pgflbucket *pgb;
    346 	struct vm_page *pagearray;
    347 	char *bucketarray;
    348 	uvm_physseg_t bank;
    349 	int fl, b;
    350 
    351 	KASSERT(ncpu <= 1);
    352 
    353 	/*
    354 	 * init the page queues and free page queue locks, except the
    355 	 * free list; we allocate that later (with the initial vm_page
    356 	 * structures).
    357 	 */
    358 
    359 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
    360 	uvmpdpol_init();
    361 	for (b = 0; b < __arraycount(uvm_freelist_locks); b++) {
    362 		mutex_init(&uvm_freelist_locks[b].lock, MUTEX_DEFAULT, IPL_VM);
    363 	}
    364 
    365 	/*
    366 	 * allocate vm_page structures.
    367 	 */
    368 
    369 	/*
    370 	 * sanity check:
    371 	 * before calling this function the MD code is expected to register
    372 	 * some free RAM with the uvm_page_physload() function.   our job
    373 	 * now is to allocate vm_page structures for this memory.
    374 	 */
    375 
    376 	if (uvm_physseg_get_last() == UVM_PHYSSEG_TYPE_INVALID)
    377 		panic("uvm_page_bootstrap: no memory pre-allocated");
    378 
    379 	/*
    380 	 * first calculate the number of free pages...
    381 	 *
    382 	 * note that we use start/end rather than avail_start/avail_end.
    383 	 * this allows us to allocate extra vm_page structures in case we
    384 	 * want to return some memory to the pool after booting.
    385 	 */
    386 
    387 	freepages = 0;
    388 
    389 	for (bank = uvm_physseg_get_first();
    390 	     uvm_physseg_valid_p(bank) ;
    391 	     bank = uvm_physseg_get_next(bank)) {
    392 		freepages += (uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank));
    393 	}
    394 
    395 	/*
    396 	 * Let MD code initialize the number of colors, or default
    397 	 * to 1 color if MD code doesn't care.
    398 	 */
    399 	if (uvmexp.ncolors == 0)
    400 		uvmexp.ncolors = 1;
    401 	uvmexp.colormask = uvmexp.ncolors - 1;
    402 	KASSERT((uvmexp.colormask & uvmexp.ncolors) == 0);
    403 
    404 	/* We always start with only 1 bucket. */
    405 	uvm.bucketcount = 1;
    406 
    407 	/*
    408 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
    409 	 * use.   for each page of memory we use we need a vm_page structure.
    410 	 * thus, the total number of pages we can use is the total size of
    411 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
    412 	 * structure.   we add one to freepages as a fudge factor to avoid
    413 	 * truncation errors (since we can only allocate in terms of whole
    414 	 * pages).
    415 	 */
    416 	pagecount = ((freepages + 1) << PAGE_SHIFT) /
    417 	    (PAGE_SIZE + sizeof(struct vm_page));
    418 	bucketsize = offsetof(struct pgflbucket, pgb_colors[uvmexp.ncolors]);
    419 	bucketsize = roundup2(bucketsize, coherency_unit);
    420 	bucketarray = (void *)uvm_pageboot_alloc(
    421 	    bucketsize * VM_NFREELIST +
    422 	    pagecount * sizeof(struct vm_page));
    423 	pagearray = (struct vm_page *)
    424 	    (bucketarray + bucketsize * VM_NFREELIST);
    425 
    426 	for (fl = 0; fl < VM_NFREELIST; fl++) {
    427 		pgb = (struct pgflbucket *)(bucketarray + bucketsize * fl);
    428 		uvm_page_init_bucket(&uvm.page_free[fl], pgb, 0);
    429 	}
    430 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
    431 
    432 	/*
    433 	 * init the freelist cache in the disabled state.
    434 	 */
    435 	uvm_pgflcache_init();
    436 
    437 	/*
    438 	 * init the vm_page structures and put them in the correct place.
    439 	 */
    440 	/* First init the extent */
    441 
    442 	for (bank = uvm_physseg_get_first(),
    443 		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
    444 	     uvm_physseg_valid_p(bank);
    445 	     bank = uvm_physseg_get_next(bank)) {
    446 
    447 		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    448 		uvm_physseg_seg_alloc_from_slab(bank, n);
    449 		uvm_physseg_init_seg(bank, pagearray);
    450 
    451 		/* set up page array pointers */
    452 		pagearray += n;
    453 		pagecount -= n;
    454 	}
    455 
    456 	/*
    457 	 * pass up the values of virtual_space_start and
    458 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
    459 	 * layers of the VM.
    460 	 */
    461 
    462 	*kvm_startp = round_page(virtual_space_start);
    463 	*kvm_endp = trunc_page(virtual_space_end);
    464 #ifdef DEBUG
    465 	/*
    466 	 * steal kva for uvm_pagezerocheck().
    467 	 */
    468 	uvm_zerocheckkva = *kvm_startp;
    469 	*kvm_startp += PAGE_SIZE;
    470 	mutex_init(&uvm_zerochecklock, MUTEX_DEFAULT, IPL_VM);
    471 #endif /* DEBUG */
    472 
    473 	/*
    474 	 * init various thresholds.
    475 	 */
    476 
    477 	uvmexp.reserve_pagedaemon = 1;
    478 	uvmexp.reserve_kernel = vm_page_reserve_kernel;
    479 
    480 	/*
    481 	 * done!
    482 	 */
    483 
    484 	uvm.page_init_done = true;
    485 }
    486 
    487 /*
    488  * uvm_pgfl_lock: lock all freelist buckets
    489  */
    490 
    491 void
    492 uvm_pgfl_lock(void)
    493 {
    494 	int i;
    495 
    496 	for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
    497 		mutex_spin_enter(&uvm_freelist_locks[i].lock);
    498 	}
    499 }
    500 
    501 /*
    502  * uvm_pgfl_unlock: unlock all freelist buckets
    503  */
    504 
    505 void
    506 uvm_pgfl_unlock(void)
    507 {
    508 	int i;
    509 
    510 	for (i = 0; i < __arraycount(uvm_freelist_locks); i++) {
    511 		mutex_spin_exit(&uvm_freelist_locks[i].lock);
    512 	}
    513 }
    514 
    515 /*
    516  * uvm_setpagesize: set the page size
    517  *
    518  * => sets page_shift and page_mask from uvmexp.pagesize.
    519  */
    520 
    521 void
    522 uvm_setpagesize(void)
    523 {
    524 
    525 	/*
    526 	 * If uvmexp.pagesize is 0 at this point, we expect PAGE_SIZE
    527 	 * to be a constant (indicated by being a non-zero value).
    528 	 */
    529 	if (uvmexp.pagesize == 0) {
    530 		if (PAGE_SIZE == 0)
    531 			panic("uvm_setpagesize: uvmexp.pagesize not set");
    532 		uvmexp.pagesize = PAGE_SIZE;
    533 	}
    534 	uvmexp.pagemask = uvmexp.pagesize - 1;
    535 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
    536 		panic("uvm_setpagesize: page size %u (%#x) not a power of two",
    537 		    uvmexp.pagesize, uvmexp.pagesize);
    538 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
    539 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
    540 			break;
    541 }
    542 
    543 /*
    544  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
    545  */
    546 
    547 vaddr_t
    548 uvm_pageboot_alloc(vsize_t size)
    549 {
    550 	static bool initialized = false;
    551 	vaddr_t addr;
    552 #if !defined(PMAP_STEAL_MEMORY)
    553 	vaddr_t vaddr;
    554 	paddr_t paddr;
    555 #endif
    556 
    557 	/*
    558 	 * on first call to this function, initialize ourselves.
    559 	 */
    560 	if (initialized == false) {
    561 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
    562 
    563 		/* round it the way we like it */
    564 		virtual_space_start = round_page(virtual_space_start);
    565 		virtual_space_end = trunc_page(virtual_space_end);
    566 
    567 		initialized = true;
    568 	}
    569 
    570 	/* round to page size */
    571 	size = round_page(size);
    572 	uvmexp.bootpages += atop(size);
    573 
    574 #if defined(PMAP_STEAL_MEMORY)
    575 
    576 	/*
    577 	 * defer bootstrap allocation to MD code (it may want to allocate
    578 	 * from a direct-mapped segment).  pmap_steal_memory should adjust
    579 	 * virtual_space_start/virtual_space_end if necessary.
    580 	 */
    581 
    582 	addr = pmap_steal_memory(size, &virtual_space_start,
    583 	    &virtual_space_end);
    584 
    585 	return(addr);
    586 
    587 #else /* !PMAP_STEAL_MEMORY */
    588 
    589 	/*
    590 	 * allocate virtual memory for this request
    591 	 */
    592 	if (virtual_space_start == virtual_space_end ||
    593 	    (virtual_space_end - virtual_space_start) < size)
    594 		panic("uvm_pageboot_alloc: out of virtual space");
    595 
    596 	addr = virtual_space_start;
    597 
    598 #ifdef PMAP_GROWKERNEL
    599 	/*
    600 	 * If the kernel pmap can't map the requested space,
    601 	 * then allocate more resources for it.
    602 	 */
    603 	if (uvm_maxkaddr < (addr + size)) {
    604 		uvm_maxkaddr = pmap_growkernel(addr + size);
    605 		if (uvm_maxkaddr < (addr + size))
    606 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
    607 	}
    608 #endif
    609 
    610 	virtual_space_start += size;
    611 
    612 	/*
    613 	 * allocate and mapin physical pages to back new virtual pages
    614 	 */
    615 
    616 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
    617 	    vaddr += PAGE_SIZE) {
    618 
    619 		if (!uvm_page_physget(&paddr))
    620 			panic("uvm_pageboot_alloc: out of memory");
    621 
    622 		/*
    623 		 * Note this memory is no longer managed, so using
    624 		 * pmap_kenter is safe.
    625 		 */
    626 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
    627 	}
    628 	pmap_update(pmap_kernel());
    629 	return(addr);
    630 #endif	/* PMAP_STEAL_MEMORY */
    631 }
    632 
    633 #if !defined(PMAP_STEAL_MEMORY)
    634 /*
    635  * uvm_page_physget: "steal" one page from the vm_physmem structure.
    636  *
    637  * => attempt to allocate it off the end of a segment in which the "avail"
    638  *    values match the start/end values.   if we can't do that, then we
    639  *    will advance both values (making them equal, and removing some
    640  *    vm_page structures from the non-avail area).
    641  * => return false if out of memory.
    642  */
    643 
    644 /* subroutine: try to allocate from memory chunks on the specified freelist */
    645 static bool uvm_page_physget_freelist(paddr_t *, int);
    646 
    647 static bool
    648 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
    649 {
    650 	uvm_physseg_t lcv;
    651 
    652 	/* pass 1: try allocating from a matching end */
    653 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    654 	for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
    655 #else
    656 	for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
    657 #endif
    658 	{
    659 		if (uvm.page_init_done == true)
    660 			panic("uvm_page_physget: called _after_ bootstrap");
    661 
    662 		/* Try to match at front or back on unused segment */
    663 		if (uvm_page_physunload(lcv, freelist, paddrp))
    664 			return true;
    665 	}
    666 
    667 	/* pass2: forget about matching ends, just allocate something */
    668 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
    669 	for (lcv = uvm_physseg_get_last(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_prev(lcv))
    670 #else
    671 	for (lcv = uvm_physseg_get_first(); uvm_physseg_valid_p(lcv); lcv = uvm_physseg_get_next(lcv))
    672 #endif
    673 	{
    674 		/* Try the front regardless. */
    675 		if (uvm_page_physunload_force(lcv, freelist, paddrp))
    676 			return true;
    677 	}
    678 	return false;
    679 }
    680 
    681 bool
    682 uvm_page_physget(paddr_t *paddrp)
    683 {
    684 	int i;
    685 
    686 	/* try in the order of freelist preference */
    687 	for (i = 0; i < VM_NFREELIST; i++)
    688 		if (uvm_page_physget_freelist(paddrp, i) == true)
    689 			return (true);
    690 	return (false);
    691 }
    692 #endif /* PMAP_STEAL_MEMORY */
    693 
    694 /*
    695  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
    696  * back from an I/O mapping (ugh!).   used in some MD code as well.
    697  */
    698 struct vm_page *
    699 uvm_phys_to_vm_page(paddr_t pa)
    700 {
    701 	paddr_t pf = atop(pa);
    702 	paddr_t	off;
    703 	uvm_physseg_t	upm;
    704 
    705 	upm = uvm_physseg_find(pf, &off);
    706 	if (upm != UVM_PHYSSEG_TYPE_INVALID)
    707 		return uvm_physseg_get_pg(upm, off);
    708 	return(NULL);
    709 }
    710 
    711 paddr_t
    712 uvm_vm_page_to_phys(const struct vm_page *pg)
    713 {
    714 
    715 	return pg->phys_addr & ~(PAGE_SIZE - 1);
    716 }
    717 
    718 /*
    719  * uvm_page_numa_load: load NUMA range description.
    720  */
    721 void
    722 uvm_page_numa_load(paddr_t start, paddr_t size, u_int numa_id)
    723 {
    724 	struct uvm_page_numa_region *d;
    725 
    726 	KASSERT(numa_id < PGFL_MAX_BUCKETS);
    727 
    728 	d = kmem_alloc(sizeof(*d), KM_SLEEP);
    729 	d->start = start;
    730 	d->size = size;
    731 	d->numa_id = numa_id;
    732 	d->next = uvm_page_numa_region;
    733 	uvm_page_numa_region = d;
    734 }
    735 
    736 /*
    737  * uvm_page_numa_lookup: lookup NUMA node for the given page.
    738  */
    739 static u_int
    740 uvm_page_numa_lookup(struct vm_page *pg)
    741 {
    742 	struct uvm_page_numa_region *d;
    743 	static bool warned;
    744 	paddr_t pa;
    745 
    746 	KASSERT(uvm.numa_alloc);
    747 	KASSERT(uvm_page_numa_region != NULL);
    748 
    749 	pa = VM_PAGE_TO_PHYS(pg);
    750 	for (d = uvm_page_numa_region; d != NULL; d = d->next) {
    751 		if (pa >= d->start && pa < d->start + d->size) {
    752 			return d->numa_id;
    753 		}
    754 	}
    755 
    756 	if (!warned) {
    757 		printf("uvm_page_numa_lookup: failed, first pg=%p pa=%#"
    758 		    PRIxPADDR "\n", pg, VM_PAGE_TO_PHYS(pg));
    759 		warned = true;
    760 	}
    761 
    762 	return 0;
    763 }
    764 
    765 /*
    766  * uvm_page_redim: adjust freelist dimensions if they have changed.
    767  */
    768 
    769 static void
    770 uvm_page_redim(int newncolors, int newnbuckets)
    771 {
    772 	struct pgfreelist npgfl;
    773 	struct pgflbucket *opgb, *npgb;
    774 	struct pgflist *ohead, *nhead;
    775 	struct vm_page *pg;
    776 	size_t bucketsize, bucketmemsize, oldbucketmemsize;
    777 	int fl, ob, oc, nb, nc, obuckets, ocolors;
    778 	char *bucketarray, *oldbucketmem, *bucketmem;
    779 
    780 	KASSERT(((newncolors - 1) & newncolors) == 0);
    781 
    782 	/* Anything to do? */
    783 	if (newncolors <= uvmexp.ncolors &&
    784 	    newnbuckets == uvm.bucketcount) {
    785 		return;
    786 	}
    787 	if (uvm.page_init_done == false) {
    788 		uvmexp.ncolors = newncolors;
    789 		return;
    790 	}
    791 
    792 	bucketsize = offsetof(struct pgflbucket, pgb_colors[newncolors]);
    793 	bucketsize = roundup2(bucketsize, coherency_unit);
    794 	bucketmemsize = bucketsize * newnbuckets * VM_NFREELIST +
    795 	    coherency_unit - 1;
    796 	bucketmem = kmem_zalloc(bucketmemsize, KM_SLEEP);
    797 	bucketarray = (char *)roundup2((uintptr_t)bucketmem, coherency_unit);
    798 
    799 	ocolors = uvmexp.ncolors;
    800 	obuckets = uvm.bucketcount;
    801 
    802 	/* Freelist cache musn't be enabled. */
    803 	uvm_pgflcache_pause();
    804 
    805 	/* Make sure we should still do this. */
    806 	uvm_pgfl_lock();
    807 	if (newncolors <= uvmexp.ncolors &&
    808 	    newnbuckets == uvm.bucketcount) {
    809 		uvm_pgfl_unlock();
    810 		uvm_pgflcache_resume();
    811 		kmem_free(bucketmem, bucketmemsize);
    812 		return;
    813 	}
    814 
    815 	uvmexp.ncolors = newncolors;
    816 	uvmexp.colormask = uvmexp.ncolors - 1;
    817 	uvm.bucketcount = newnbuckets;
    818 
    819 	for (fl = 0; fl < VM_NFREELIST; fl++) {
    820 		/* Init new buckets in new freelist. */
    821 		memset(&npgfl, 0, sizeof(npgfl));
    822 		for (nb = 0; nb < newnbuckets; nb++) {
    823 			npgb = (struct pgflbucket *)bucketarray;
    824 			uvm_page_init_bucket(&npgfl, npgb, nb);
    825 			bucketarray += bucketsize;
    826 		}
    827 		/* Now transfer pages from the old freelist. */
    828 		for (nb = ob = 0; ob < obuckets; ob++) {
    829 			opgb = uvm.page_free[fl].pgfl_buckets[ob];
    830 			for (oc = 0; oc < ocolors; oc++) {
    831 				ohead = &opgb->pgb_colors[oc];
    832 				while ((pg = LIST_FIRST(ohead)) != NULL) {
    833 					LIST_REMOVE(pg, pageq.list);
    834 					/*
    835 					 * Here we decide on the NEW color &
    836 					 * bucket for the page.  For NUMA
    837 					 * we'll use the info that the
    838 					 * hardware gave us.  For non-NUMA
    839 					 * assign take physical page frame
    840 					 * number and cache color into
    841 					 * account.  We do this to try and
    842 					 * avoid defeating any memory
    843 					 * interleaving in the hardware.
    844 					 */
    845 					KASSERT(
    846 					    uvm_page_get_bucket(pg) == ob);
    847 					KASSERT(fl ==
    848 					    uvm_page_get_freelist(pg));
    849 					if (uvm.numa_alloc) {
    850 						nb = uvm_page_numa_lookup(pg);
    851 					} else {
    852 						nb = atop(VM_PAGE_TO_PHYS(pg))
    853 						    / uvmexp.ncolors / 8
    854 						    % newnbuckets;
    855 					}
    856 					uvm_page_set_bucket(pg, nb);
    857 					npgb = npgfl.pgfl_buckets[nb];
    858 					npgb->pgb_nfree++;
    859 					nc = VM_PGCOLOR(pg);
    860 					nhead = &npgb->pgb_colors[nc];
    861 					LIST_INSERT_HEAD(nhead, pg, pageq.list);
    862 				}
    863 			}
    864 		}
    865 		/* Install the new freelist. */
    866 		memcpy(&uvm.page_free[fl], &npgfl, sizeof(npgfl));
    867 	}
    868 
    869 	/* Unlock and free the old memory. */
    870 	oldbucketmemsize = recolored_pages_memsize;
    871 	oldbucketmem = recolored_pages_mem;
    872 	recolored_pages_memsize = bucketmemsize;
    873 	recolored_pages_mem = bucketmem;
    874 
    875 	uvm_pgfl_unlock();
    876 	uvm_pgflcache_resume();
    877 
    878 	if (oldbucketmemsize) {
    879 		kmem_free(oldbucketmem, oldbucketmemsize);
    880 	}
    881 
    882 	/*
    883 	 * this calls uvm_km_alloc() which may want to hold
    884 	 * uvm_freelist_lock.
    885 	 */
    886 	uvm_pager_realloc_emerg();
    887 }
    888 
    889 /*
    890  * uvm_page_recolor: Recolor the pages if the new color count is
    891  * larger than the old one.
    892  */
    893 
    894 void
    895 uvm_page_recolor(int newncolors)
    896 {
    897 
    898 	uvm_page_redim(newncolors, uvm.bucketcount);
    899 }
    900 
    901 /*
    902  * uvm_page_rebucket: Determine a bucket structure and redim the free
    903  * lists to match.
    904  */
    905 
    906 void
    907 uvm_page_rebucket(void)
    908 {
    909 	u_int min_numa, max_numa, npackage, shift;
    910 	struct cpu_info *ci, *ci2, *ci3;
    911 	CPU_INFO_ITERATOR cii;
    912 
    913 	/*
    914 	 * If we have more than one NUMA node, and the maximum NUMA node ID
    915 	 * is less than PGFL_MAX_BUCKETS, then we'll use NUMA distribution
    916 	 * for free pages.  uvm_pagefree() will not reassign pages to a
    917 	 * different bucket on free.
    918 	 */
    919 	min_numa = (u_int)-1;
    920 	max_numa = 0;
    921 	for (CPU_INFO_FOREACH(cii, ci)) {
    922 		if (ci->ci_numa_id < min_numa) {
    923 			min_numa = ci->ci_numa_id;
    924 		}
    925 		if (ci->ci_numa_id > max_numa) {
    926 			max_numa = ci->ci_numa_id;
    927 		}
    928 	}
    929 	if (min_numa != max_numa && max_numa < PGFL_MAX_BUCKETS) {
    930 #ifdef NUMA
    931 		/*
    932 		 * We can do this, and it seems to work well, but until
    933 		 * further experiments are done we'll stick with the cache
    934 		 * locality strategy.
    935 		 */
    936 		aprint_debug("UVM: using NUMA allocation scheme\n");
    937 		for (CPU_INFO_FOREACH(cii, ci)) {
    938 			ci->ci_data.cpu_uvm->pgflbucket = ci->ci_numa_id;
    939 		}
    940 		uvm.numa_alloc = true;
    941 	 	uvm_page_redim(uvmexp.ncolors, max_numa + 1);
    942 	 	return;
    943 #endif
    944 	}
    945 
    946 	/*
    947 	 * Otherwise we'll go with a scheme to maximise L2/L3 cache locality
    948 	 * and minimise lock contention.  Count the total number of CPU
    949 	 * packages, and then try to distribute the buckets among CPU
    950 	 * packages evenly.  uvm_pagefree() will reassign pages to the
    951 	 * freeing CPU's preferred bucket on free.
    952 	 */
    953 	npackage = curcpu()->ci_nsibling[CPUREL_PACKAGE1ST];
    954 
    955 	/*
    956 	 * Figure out how to arrange the packages & buckets, and the total
    957 	 * number of buckets we need.  XXX 2 may not be the best factor.
    958 	 */
    959 	for (shift = 0; npackage > PGFL_MAX_BUCKETS; shift++) {
    960 		npackage >>= 1;
    961 	}
    962  	uvm_page_redim(uvmexp.ncolors, npackage);
    963 
    964  	/*
    965  	 * Now tell each CPU which bucket to use.  In the outer loop, scroll
    966  	 * through all CPU packages.
    967  	 */
    968  	npackage = 0;
    969 	ci = curcpu();
    970 	ci2 = ci->ci_sibling[CPUREL_PACKAGE1ST];
    971 	do {
    972 		/*
    973 		 * In the inner loop, scroll through all CPUs in the package
    974 		 * and assign the same bucket ID.
    975 		 */
    976 		ci3 = ci2;
    977 		do {
    978 			ci3->ci_data.cpu_uvm->pgflbucket = npackage >> shift;
    979 			ci3 = ci3->ci_sibling[CPUREL_PACKAGE];
    980 		} while (ci3 != ci2);
    981 		npackage++;
    982 		ci2 = ci2->ci_sibling[CPUREL_PACKAGE1ST];
    983 	} while (ci2 != ci->ci_sibling[CPUREL_PACKAGE1ST]);
    984 
    985 	aprint_debug("UVM: using package allocation scheme, "
    986 	    "%d package(s) per bucket\n", 1 << shift);
    987 }
    988 
    989 /*
    990  * uvm_cpu_attach: initialize per-CPU data structures.
    991  */
    992 
    993 void
    994 uvm_cpu_attach(struct cpu_info *ci)
    995 {
    996 	struct uvm_cpu *ucpu;
    997 
    998 	/* Already done in uvm_page_init(). */
    999 	if (!CPU_IS_PRIMARY(ci)) {
   1000 		/* Add more reserve pages for this CPU. */
   1001 		uvmexp.reserve_kernel += vm_page_reserve_kernel;
   1002 
   1003 		/* Allocate per-CPU data structures. */
   1004 		ucpu = kmem_zalloc(sizeof(struct uvm_cpu) + coherency_unit - 1,
   1005 		    KM_SLEEP);
   1006 		ucpu = (struct uvm_cpu *)roundup2((uintptr_t)ucpu,
   1007 		    coherency_unit);
   1008 		ci->ci_data.cpu_uvm = ucpu;
   1009 	} else {
   1010 		ucpu = ci->ci_data.cpu_uvm;
   1011 	}
   1012 
   1013 	uvmpdpol_init_cpu(ucpu);
   1014 
   1015 	/*
   1016 	 * Attach RNG source for this CPU's VM events
   1017 	 */
   1018         rnd_attach_source(&ucpu->rs, ci->ci_data.cpu_name, RND_TYPE_VM,
   1019 	    RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE|
   1020 	    RND_FLAG_ESTIMATE_VALUE);
   1021 }
   1022 
   1023 /*
   1024  * uvm_availmem: fetch the total amount of free memory in pages.  this can
   1025  * have a detrimental effect on performance due to false sharing; don't call
   1026  * unless needed.
   1027  */
   1028 
   1029 int
   1030 uvm_availmem(void)
   1031 {
   1032 	struct pgfreelist *pgfl;
   1033 	int fl, b, fpages;
   1034 
   1035 	fpages = 0;
   1036 	for (fl = 0; fl < VM_NFREELIST; fl++) {
   1037 		pgfl = &uvm.page_free[fl];
   1038 		for (b = 0; b < uvm.bucketcount; b++) {
   1039 			fpages += pgfl->pgfl_buckets[b]->pgb_nfree;
   1040 		}
   1041 	}
   1042 	return fpages;
   1043 }
   1044 
   1045 /*
   1046  * uvm_pagealloc_pgb: helper routine that tries to allocate any color from a
   1047  * specific freelist and specific bucket only.
   1048  *
   1049  * => must be at IPL_VM or higher to protect per-CPU data structures.
   1050  */
   1051 
   1052 static struct vm_page *
   1053 uvm_pagealloc_pgb(struct uvm_cpu *ucpu, int f, int b, int *trycolorp, int flags)
   1054 {
   1055 	int c, trycolor, colormask;
   1056 	struct pgflbucket *pgb;
   1057 	struct vm_page *pg;
   1058 	kmutex_t *lock;
   1059 	bool fill;
   1060 
   1061 	/*
   1062 	 * Skip the bucket if empty, no lock needed.  There could be many
   1063 	 * empty freelists/buckets.
   1064 	 */
   1065 	pgb = uvm.page_free[f].pgfl_buckets[b];
   1066 	if (pgb->pgb_nfree == 0) {
   1067 		return NULL;
   1068 	}
   1069 
   1070 	/* Skip bucket if low on memory. */
   1071 	lock = &uvm_freelist_locks[b].lock;
   1072 	mutex_spin_enter(lock);
   1073 	if (__predict_false(pgb->pgb_nfree <= uvmexp.reserve_kernel)) {
   1074 		if ((flags & UVM_PGA_USERESERVE) == 0 ||
   1075 		    (pgb->pgb_nfree <= uvmexp.reserve_pagedaemon &&
   1076 		     curlwp != uvm.pagedaemon_lwp)) {
   1077 			mutex_spin_exit(lock);
   1078 		     	return NULL;
   1079 		}
   1080 		fill = false;
   1081 	} else {
   1082 		fill = true;
   1083 	}
   1084 
   1085 	/* Try all page colors as needed. */
   1086 	c = trycolor = *trycolorp;
   1087 	colormask = uvmexp.colormask;
   1088 	do {
   1089 		pg = LIST_FIRST(&pgb->pgb_colors[c]);
   1090 		if (__predict_true(pg != NULL)) {
   1091 			/*
   1092 			 * Got a free page!  PG_FREE must be cleared under
   1093 			 * lock because of uvm_pglistalloc().
   1094 			 */
   1095 			LIST_REMOVE(pg, pageq.list);
   1096 			KASSERT(pg->flags & PG_FREE);
   1097 			pg->flags &= PG_ZERO;
   1098 			pgb->pgb_nfree--;
   1099 
   1100 			/*
   1101 			 * While we have the bucket locked and our data
   1102 			 * structures fresh in L1 cache, we have an ideal
   1103 			 * opportunity to grab some pages for the freelist
   1104 			 * cache without causing extra contention.  Only do
   1105 			 * so if we found pages in this CPU's preferred
   1106 			 * bucket.
   1107 			 */
   1108 			if (__predict_true(b == ucpu->pgflbucket && fill)) {
   1109 				uvm_pgflcache_fill(ucpu, f, b, c);
   1110 			}
   1111 			mutex_spin_exit(lock);
   1112 			KASSERT(uvm_page_get_bucket(pg) == b);
   1113 			CPU_COUNT(c == trycolor ?
   1114 			    CPU_COUNT_COLORHIT : CPU_COUNT_COLORMISS, 1);
   1115 			CPU_COUNT(CPU_COUNT_CPUMISS, 1);
   1116 			*trycolorp = c;
   1117 			return pg;
   1118 		}
   1119 		c = (c + 1) & colormask;
   1120 	} while (c != trycolor);
   1121 	mutex_spin_exit(lock);
   1122 
   1123 	return NULL;
   1124 }
   1125 
   1126 /*
   1127  * uvm_pagealloc_pgfl: helper routine for uvm_pagealloc_strat that allocates
   1128  * any color from any bucket, in a specific freelist.
   1129  *
   1130  * => must be at IPL_VM or higher to protect per-CPU data structures.
   1131  */
   1132 
   1133 static struct vm_page *
   1134 uvm_pagealloc_pgfl(struct uvm_cpu *ucpu, int f, int *trycolorp, int flags)
   1135 {
   1136 	int b, trybucket, bucketcount;
   1137 	struct vm_page *pg;
   1138 
   1139 	/* Try for the exact thing in the per-CPU cache. */
   1140 	if ((pg = uvm_pgflcache_alloc(ucpu, f, *trycolorp)) != NULL) {
   1141 		CPU_COUNT(CPU_COUNT_CPUHIT, 1);
   1142 		CPU_COUNT(CPU_COUNT_COLORHIT, 1);
   1143 		return pg;
   1144 	}
   1145 
   1146 	/* Walk through all buckets, trying our preferred bucket first. */
   1147 	trybucket = ucpu->pgflbucket;
   1148 	b = trybucket;
   1149 	bucketcount = uvm.bucketcount;
   1150 	do {
   1151 		pg = uvm_pagealloc_pgb(ucpu, f, b, trycolorp, flags);
   1152 		if (pg != NULL) {
   1153 			return pg;
   1154 		}
   1155 		b = (b + 1 == bucketcount ? 0 : b + 1);
   1156 	} while (b != trybucket);
   1157 
   1158 	return NULL;
   1159 }
   1160 
   1161 /*
   1162  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
   1163  *
   1164  * => return null if no pages free
   1165  * => wake up pagedaemon if number of free pages drops below low water mark
   1166  * => if obj != NULL, obj must be locked (to put in obj's tree)
   1167  * => if anon != NULL, anon must be locked (to put in anon)
   1168  * => only one of obj or anon can be non-null
   1169  * => caller must activate/deactivate page if it is not wired.
   1170  * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
   1171  * => policy decision: it is more important to pull a page off of the
   1172  *	appropriate priority free list than it is to get a zero'd or
   1173  *	unknown contents page.  This is because we live with the
   1174  *	consequences of a bad free list decision for the entire
   1175  *	lifetime of the page, e.g. if the page comes from memory that
   1176  *	is slower to access.
   1177  */
   1178 
   1179 struct vm_page *
   1180 uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
   1181     int flags, int strat, int free_list)
   1182 {
   1183 	int zeroit = 0, color;
   1184 	int lcv, error, s;
   1185 	struct uvm_cpu *ucpu;
   1186 	struct vm_page *pg;
   1187 	lwp_t *l;
   1188 
   1189 	KASSERT(obj == NULL || anon == NULL);
   1190 	KASSERT(anon == NULL || (flags & UVM_FLAG_COLORMATCH) || off == 0);
   1191 	KASSERT(off == trunc_page(off));
   1192 	KASSERT(obj == NULL || mutex_owned(obj->vmobjlock));
   1193 	KASSERT(anon == NULL || anon->an_lock == NULL ||
   1194 	    mutex_owned(anon->an_lock));
   1195 
   1196 	/*
   1197 	 * This implements a global round-robin page coloring
   1198 	 * algorithm.
   1199 	 */
   1200 
   1201 	s = splvm();
   1202 	ucpu = curcpu()->ci_data.cpu_uvm;
   1203 	if (flags & UVM_FLAG_COLORMATCH) {
   1204 		color = atop(off) & uvmexp.colormask;
   1205 	} else {
   1206 		color = ucpu->pgflcolor;
   1207 	}
   1208 
   1209 	/*
   1210 	 * fail if any of these conditions is true:
   1211 	 * [1]  there really are no free pages, or
   1212 	 * [2]  only kernel "reserved" pages remain and
   1213 	 *        reserved pages have not been requested.
   1214 	 * [3]  only pagedaemon "reserved" pages remain and
   1215 	 *        the requestor isn't the pagedaemon.
   1216 	 * we make kernel reserve pages available if called by a
   1217 	 * kernel thread or a realtime thread.
   1218 	 */
   1219 	l = curlwp;
   1220 	if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
   1221 		flags |= UVM_PGA_USERESERVE;
   1222 	}
   1223 
   1224 	/* If the allocator's running in NUMA mode, go with NUMA strategy. */
   1225 	if (uvm.numa_alloc && strat == UVM_PGA_STRAT_NORMAL) {
   1226 		strat = UVM_PGA_STRAT_NUMA;
   1227 	}
   1228 
   1229  again:
   1230 	switch (strat) {
   1231 	case UVM_PGA_STRAT_NORMAL:
   1232 		/* Check freelists: descending priority (ascending id) order. */
   1233 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1234 			pg = uvm_pagealloc_pgfl(ucpu, lcv, &color, flags);
   1235 			if (pg != NULL) {
   1236 				goto gotit;
   1237 			}
   1238 		}
   1239 
   1240 		/* No pages free!  Have pagedaemon free some memory. */
   1241 		splx(s);
   1242 		uvm_kick_pdaemon();
   1243 		return NULL;
   1244 
   1245 	case UVM_PGA_STRAT_ONLY:
   1246 	case UVM_PGA_STRAT_FALLBACK:
   1247 		/* Attempt to allocate from the specified free list. */
   1248 		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
   1249 		pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
   1250 		if (pg != NULL) {
   1251 			goto gotit;
   1252 		}
   1253 
   1254 		/* Fall back, if possible. */
   1255 		if (strat == UVM_PGA_STRAT_FALLBACK) {
   1256 			strat = UVM_PGA_STRAT_NORMAL;
   1257 			goto again;
   1258 		}
   1259 
   1260 		/* No pages free!  Have pagedaemon free some memory. */
   1261 		splx(s);
   1262 		uvm_kick_pdaemon();
   1263 		return NULL;
   1264 
   1265 	case UVM_PGA_STRAT_NUMA:
   1266 		/*
   1267 		 * NUMA strategy: allocating from the correct bucket is more
   1268 		 * important than observing freelist priority.  Look only to
   1269 		 * the current NUMA node; if that fails, we need to look to
   1270 		 * other NUMA nodes, so retry with the normal strategy.
   1271 		 */
   1272 		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
   1273 			pg = uvm_pgflcache_alloc(ucpu, lcv, color);
   1274 			if (pg != NULL) {
   1275 				CPU_COUNT(CPU_COUNT_CPUHIT, 1);
   1276 				CPU_COUNT(CPU_COUNT_COLORHIT, 1);
   1277 				goto gotit;
   1278 			}
   1279 			pg = uvm_pagealloc_pgb(ucpu, lcv,
   1280 			    ucpu->pgflbucket, &color, flags);
   1281 			if (pg != NULL) {
   1282 				goto gotit;
   1283 			}
   1284 		}
   1285 		strat = UVM_PGA_STRAT_NORMAL;
   1286 		goto again;
   1287 
   1288 	default:
   1289 		panic("uvm_pagealloc_strat: bad strat %d", strat);
   1290 		/* NOTREACHED */
   1291 	}
   1292 
   1293  gotit:
   1294 	/*
   1295 	 * We now know which color we actually allocated from; set
   1296 	 * the next color accordingly.
   1297 	 */
   1298 
   1299 	ucpu->pgflcolor = (color + 1) & uvmexp.colormask;
   1300 
   1301 	/*
   1302 	 * while still at IPL_VM, update allocation statistics and remember
   1303 	 * if we have to zero the page
   1304 	 */
   1305 
   1306 	if (flags & UVM_PGA_ZERO) {
   1307 		if (pg->flags & PG_ZERO) {
   1308 		    	CPU_COUNT(CPU_COUNT_PGA_ZEROHIT, 1);
   1309 			zeroit = 0;
   1310 		} else {
   1311 		    	CPU_COUNT(CPU_COUNT_PGA_ZEROMISS, 1);
   1312 			zeroit = 1;
   1313 		}
   1314 	}
   1315 	if (pg->flags & PG_ZERO) {
   1316 	    	CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
   1317 	}
   1318 	if (anon) {
   1319 		CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
   1320 		CPU_COUNT(CPU_COUNT_ANONCLEAN, 1);
   1321 	}
   1322 	splx(s);
   1323 	KASSERT((pg->flags & ~(PG_ZERO|PG_FREE)) == 0);
   1324 
   1325 	/*
   1326 	 * assign the page to the object.  as the page was free, we know
   1327 	 * that pg->uobject and pg->uanon are NULL.  we only need to take
   1328 	 * the page's interlock if we are changing the values.
   1329 	 */
   1330 	if (anon != NULL || obj != NULL) {
   1331 		mutex_enter(&pg->interlock);
   1332 	}
   1333 	pg->offset = off;
   1334 	pg->uobject = obj;
   1335 	pg->uanon = anon;
   1336 	KASSERT(uvm_page_owner_locked_p(pg));
   1337 	pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
   1338 	if (anon) {
   1339 		anon->an_page = pg;
   1340 		pg->flags |= PG_ANON;
   1341 		mutex_exit(&pg->interlock);
   1342 	} else if (obj) {
   1343 		/*
   1344 		 * set PG_FILE|PG_AOBJ before the first uvm_pageinsert.
   1345 		 */
   1346 		if (UVM_OBJ_IS_VNODE(obj)) {
   1347 			pg->flags |= PG_FILE;
   1348 		} else {
   1349 			pg->flags |= PG_AOBJ;
   1350 		}
   1351 		uvm_pageinsert_object(obj, pg);
   1352 		mutex_exit(&pg->interlock);
   1353 		error = uvm_pageinsert_tree(obj, pg);
   1354 		if (error != 0) {
   1355 			mutex_enter(&pg->interlock);
   1356 			uvm_pageremove_object(obj, pg);
   1357 			mutex_exit(&pg->interlock);
   1358 			uvm_pagefree(pg);
   1359 			return NULL;
   1360 		}
   1361 	}
   1362 
   1363 #if defined(UVM_PAGE_TRKOWN)
   1364 	pg->owner_tag = NULL;
   1365 #endif
   1366 	UVM_PAGE_OWN(pg, "new alloc");
   1367 
   1368 	if (flags & UVM_PGA_ZERO) {
   1369 		/*
   1370 		 * A zero'd page is not clean.  If we got a page not already
   1371 		 * zero'd, then we have to zero it ourselves.
   1372 		 */
   1373 		if (obj != NULL || anon != NULL) {
   1374 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   1375 		}
   1376 		if (zeroit) {
   1377 			pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   1378 		}
   1379 	}
   1380 
   1381 	return(pg);
   1382 }
   1383 
   1384 /*
   1385  * uvm_pagereplace: replace a page with another
   1386  *
   1387  * => object must be locked
   1388  * => page interlocks must be held
   1389  */
   1390 
   1391 void
   1392 uvm_pagereplace(struct vm_page *oldpg, struct vm_page *newpg)
   1393 {
   1394 	struct uvm_object *uobj = oldpg->uobject;
   1395 	struct vm_page *pg __diagused;
   1396 	uint64_t idx;
   1397 
   1398 	KASSERT((oldpg->flags & PG_TABLED) != 0);
   1399 	KASSERT(uobj != NULL);
   1400 	KASSERT((newpg->flags & PG_TABLED) == 0);
   1401 	KASSERT(newpg->uobject == NULL);
   1402 	KASSERT(mutex_owned(uobj->vmobjlock));
   1403 	KASSERT(mutex_owned(&oldpg->interlock));
   1404 	KASSERT(mutex_owned(&newpg->interlock));
   1405 
   1406 	newpg->uobject = uobj;
   1407 	newpg->offset = oldpg->offset;
   1408 	idx = newpg->offset >> PAGE_SHIFT;
   1409 	pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg);
   1410 	KASSERT(pg == oldpg);
   1411 	if (((oldpg->flags ^ newpg->flags) & PG_CLEAN) != 0) {
   1412 		if ((newpg->flags & PG_CLEAN) != 0) {
   1413 			radix_tree_clear_tag(&uobj->uo_pages, idx,
   1414 			    UVM_PAGE_DIRTY_TAG);
   1415 		} else {
   1416 			radix_tree_set_tag(&uobj->uo_pages, idx,
   1417 			    UVM_PAGE_DIRTY_TAG);
   1418 		}
   1419 	}
   1420 	/*
   1421 	 * oldpg's PG_STAT is stable.  newpg is not reachable by others yet.
   1422 	 */
   1423 	newpg->flags |=
   1424 	    (newpg->flags & ~PG_STAT) | (oldpg->flags & PG_STAT);
   1425 	uvm_pageinsert_object(uobj, newpg);
   1426 	uvm_pageremove_object(uobj, oldpg);
   1427 }
   1428 
   1429 /*
   1430  * uvm_pagerealloc: reallocate a page from one object to another
   1431  *
   1432  * => both objects must be locked
   1433  * => both interlocks must be held
   1434  */
   1435 
   1436 void
   1437 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
   1438 {
   1439 	/*
   1440 	 * remove it from the old object
   1441 	 */
   1442 
   1443 	if (pg->uobject) {
   1444 		uvm_pageremove_tree(pg->uobject, pg);
   1445 		uvm_pageremove_object(pg->uobject, pg);
   1446 	}
   1447 
   1448 	/*
   1449 	 * put it in the new object
   1450 	 */
   1451 
   1452 	if (newobj) {
   1453 		/*
   1454 		 * XXX we have no in-tree users of this functionality
   1455 		 */
   1456 		panic("uvm_pagerealloc: no impl");
   1457 	}
   1458 }
   1459 
   1460 #ifdef DEBUG
   1461 /*
   1462  * check if page is zero-filled
   1463  */
   1464 void
   1465 uvm_pagezerocheck(struct vm_page *pg)
   1466 {
   1467 	int *p, *ep;
   1468 
   1469 	KASSERT(uvm_zerocheckkva != 0);
   1470 
   1471 	/*
   1472 	 * XXX assuming pmap_kenter_pa and pmap_kremove never call
   1473 	 * uvm page allocator.
   1474 	 *
   1475 	 * it might be better to have "CPU-local temporary map" pmap interface.
   1476 	 */
   1477 	mutex_spin_enter(&uvm_zerochecklock);
   1478 	pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
   1479 	p = (int *)uvm_zerocheckkva;
   1480 	ep = (int *)((char *)p + PAGE_SIZE);
   1481 	pmap_update(pmap_kernel());
   1482 	while (p < ep) {
   1483 		if (*p != 0)
   1484 			panic("PG_ZERO page isn't zero-filled");
   1485 		p++;
   1486 	}
   1487 	pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
   1488 	mutex_spin_exit(&uvm_zerochecklock);
   1489 	/*
   1490 	 * pmap_update() is not necessary here because no one except us
   1491 	 * uses this VA.
   1492 	 */
   1493 }
   1494 #endif /* DEBUG */
   1495 
   1496 /*
   1497  * uvm_pagefree: free page
   1498  *
   1499  * => erase page's identity (i.e. remove from object)
   1500  * => put page on free list
   1501  * => caller must lock owning object (either anon or uvm_object)
   1502  * => assumes all valid mappings of pg are gone
   1503  */
   1504 
   1505 void
   1506 uvm_pagefree(struct vm_page *pg)
   1507 {
   1508 	struct pgfreelist *pgfl;
   1509 	struct pgflbucket *pgb;
   1510 	struct uvm_cpu *ucpu;
   1511 	kmutex_t *lock;
   1512 	int bucket, s;
   1513 	bool locked;
   1514 
   1515 #ifdef DEBUG
   1516 	if (pg->uobject == (void *)0xdeadbeef &&
   1517 	    pg->uanon == (void *)0xdeadbeef) {
   1518 		panic("uvm_pagefree: freeing free page %p", pg);
   1519 	}
   1520 #endif /* DEBUG */
   1521 
   1522 	KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1523 	KASSERT(!(pg->flags & PG_FREE));
   1524 	KASSERT(pg->uobject == NULL || mutex_owned(pg->uobject->vmobjlock));
   1525 	KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
   1526 		mutex_owned(pg->uanon->an_lock));
   1527 
   1528 	/*
   1529 	 * remove the page from the object's tree beore acquiring any page
   1530 	 * interlocks: this can acquire locks to free radixtree nodes.
   1531 	 */
   1532 	if (pg->uobject != NULL) {
   1533 		uvm_pageremove_tree(pg->uobject, pg);
   1534 	}
   1535 
   1536 	/*
   1537 	 * if the page is loaned, resolve the loan instead of freeing.
   1538 	 */
   1539 
   1540 	if (pg->loan_count) {
   1541 		KASSERT(pg->wire_count == 0);
   1542 
   1543 		/*
   1544 		 * if the page is owned by an anon then we just want to
   1545 		 * drop anon ownership.  the kernel will free the page when
   1546 		 * it is done with it.  if the page is owned by an object,
   1547 		 * remove it from the object and mark it dirty for the benefit
   1548 		 * of possible anon owners.
   1549 		 *
   1550 		 * regardless of previous ownership, wakeup any waiters,
   1551 		 * unbusy the page, and we're done.
   1552 		 */
   1553 
   1554 		uvm_pagelock(pg);
   1555 		locked = true;
   1556 		if (pg->uobject != NULL) {
   1557 			uvm_pageremove_object(pg->uobject, pg);
   1558 			pg->flags &= ~(PG_FILE|PG_AOBJ);
   1559 		} else if (pg->uanon != NULL) {
   1560 			if ((pg->flags & PG_ANON) == 0) {
   1561 				pg->loan_count--;
   1562 			} else {
   1563 				pg->flags &= ~PG_ANON;
   1564 				cpu_count(CPU_COUNT_ANONPAGES, -1);
   1565 			}
   1566 			pg->uanon->an_page = NULL;
   1567 			pg->uanon = NULL;
   1568 		}
   1569 		if (pg->flags & PG_WANTED) {
   1570 			wakeup(pg);
   1571 		}
   1572 		pg->flags &= ~(PG_WANTED|PG_BUSY|PG_RELEASED|PG_PAGER1);
   1573 #ifdef UVM_PAGE_TRKOWN
   1574 		pg->owner_tag = NULL;
   1575 #endif
   1576 		KASSERT((pg->flags & PG_STAT) == 0);
   1577 		if (pg->loan_count) {
   1578 			KASSERT(pg->uobject == NULL);
   1579 			if (pg->uanon == NULL) {
   1580 				uvm_pagedequeue(pg);
   1581 			}
   1582 			uvm_pageunlock(pg);
   1583 			return;
   1584 		}
   1585 	} else if (pg->uobject != NULL || pg->uanon != NULL ||
   1586 	           pg->wire_count != 0) {
   1587 		uvm_pagelock(pg);
   1588 		locked = true;
   1589 	} else {
   1590 		locked = false;
   1591 	}
   1592 
   1593 	/*
   1594 	 * remove page from its object or anon.
   1595 	 */
   1596 	if (pg->uobject != NULL) {
   1597 		uvm_pageremove_object(pg->uobject, pg);
   1598 	} else if (pg->uanon != NULL) {
   1599 		const unsigned int status = uvm_pagegetdirty(pg);
   1600 		pg->uanon->an_page = NULL;
   1601 		pg->uanon = NULL;
   1602 		kpreempt_disable();
   1603 		CPU_COUNT(CPU_COUNT_ANONPAGES, -1);
   1604 		CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, -1);
   1605 		kpreempt_enable();
   1606 	}
   1607 
   1608 	/*
   1609 	 * if the page was wired, unwire it now.
   1610 	 */
   1611 
   1612 	if (pg->wire_count) {
   1613 		pg->wire_count = 0;
   1614 		atomic_dec_uint(&uvmexp.wired);
   1615 	}
   1616 	if (locked) {
   1617 		/*
   1618 		 * now remove the page from the queues.
   1619 		 */
   1620 		uvm_pagedequeue(pg);
   1621 		uvm_pageunlock(pg);
   1622 	} else {
   1623 		KASSERT(!uvmpdpol_pageisqueued_p(pg));
   1624 	}
   1625 
   1626 	/*
   1627 	 * and put on free queue
   1628 	 */
   1629 
   1630 #ifdef DEBUG
   1631 	pg->uobject = (void *)0xdeadbeef;
   1632 	pg->uanon = (void *)0xdeadbeef;
   1633 	if (pg->flags & PG_ZERO)
   1634 		uvm_pagezerocheck(pg);
   1635 #endif /* DEBUG */
   1636 
   1637 	/* Try to send the page to the per-CPU cache. */
   1638 	s = splvm();
   1639 	if (pg->flags & PG_ZERO) {
   1640 	    	CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
   1641 	}
   1642 	ucpu = curcpu()->ci_data.cpu_uvm;
   1643 	bucket = uvm_page_get_bucket(pg);
   1644 	if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
   1645 		splx(s);
   1646 		return;
   1647 	}
   1648 
   1649 	/* Didn't work.  Never mind, send it to a global bucket. */
   1650 	pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
   1651 	pgb = pgfl->pgfl_buckets[bucket];
   1652 	lock = &uvm_freelist_locks[bucket].lock;
   1653 
   1654 	mutex_spin_enter(lock);
   1655 	/* PG_FREE must be set under lock because of uvm_pglistalloc(). */
   1656 	pg->flags = (pg->flags & PG_ZERO) | PG_FREE;
   1657 	LIST_INSERT_HEAD(&pgb->pgb_colors[VM_PGCOLOR(pg)], pg, pageq.list);
   1658 	pgb->pgb_nfree++;
   1659 	mutex_spin_exit(lock);
   1660 	splx(s);
   1661 }
   1662 
   1663 /*
   1664  * uvm_page_unbusy: unbusy an array of pages.
   1665  *
   1666  * => pages must either all belong to the same object, or all belong to anons.
   1667  * => if pages are object-owned, object must be locked.
   1668  * => if pages are anon-owned, anons must be locked.
   1669  * => caller must make sure that anon-owned pages are not PG_RELEASED.
   1670  */
   1671 
   1672 void
   1673 uvm_page_unbusy(struct vm_page **pgs, int npgs)
   1674 {
   1675 	struct vm_page *pg;
   1676 	int i;
   1677 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
   1678 
   1679 	for (i = 0; i < npgs; i++) {
   1680 		pg = pgs[i];
   1681 		if (pg == NULL || pg == PGO_DONTCARE) {
   1682 			continue;
   1683 		}
   1684 
   1685 		KASSERT(uvm_page_owner_locked_p(pg));
   1686 		KASSERT(pg->flags & PG_BUSY);
   1687 		KASSERT((pg->flags & PG_PAGEOUT) == 0);
   1688 		if (pg->flags & PG_WANTED) {
   1689 			/* XXXAD thundering herd problem. */
   1690 			wakeup(pg);
   1691 		}
   1692 		if (pg->flags & PG_RELEASED) {
   1693 			UVMHIST_LOG(ubchist, "releasing pg %#jx",
   1694 			    (uintptr_t)pg, 0, 0, 0);
   1695 			KASSERT(pg->uobject != NULL ||
   1696 			    (pg->uanon != NULL && pg->uanon->an_ref > 0));
   1697 			pg->flags &= ~PG_RELEASED;
   1698 			uvm_pagefree(pg);
   1699 		} else {
   1700 			UVMHIST_LOG(ubchist, "unbusying pg %#jx",
   1701 			    (uintptr_t)pg, 0, 0, 0);
   1702 			KASSERT((pg->flags & PG_FAKE) == 0);
   1703 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1704 			UVM_PAGE_OWN(pg, NULL);
   1705 		}
   1706 	}
   1707 }
   1708 
   1709 #if defined(UVM_PAGE_TRKOWN)
   1710 /*
   1711  * uvm_page_own: set or release page ownership
   1712  *
   1713  * => this is a debugging function that keeps track of who sets PG_BUSY
   1714  *	and where they do it.   it can be used to track down problems
   1715  *	such a process setting "PG_BUSY" and never releasing it.
   1716  * => page's object [if any] must be locked
   1717  * => if "tag" is NULL then we are releasing page ownership
   1718  */
   1719 void
   1720 uvm_page_own(struct vm_page *pg, const char *tag)
   1721 {
   1722 
   1723 	KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
   1724 	KASSERT((pg->flags & PG_WANTED) == 0);
   1725 	KASSERT(uvm_page_owner_locked_p(pg));
   1726 
   1727 	/* gain ownership? */
   1728 	if (tag) {
   1729 		KASSERT((pg->flags & PG_BUSY) != 0);
   1730 		if (pg->owner_tag) {
   1731 			printf("uvm_page_own: page %p already owned "
   1732 			    "by proc %d [%s]\n", pg,
   1733 			    pg->owner, pg->owner_tag);
   1734 			panic("uvm_page_own");
   1735 		}
   1736 		pg->owner = curproc->p_pid;
   1737 		pg->lowner = curlwp->l_lid;
   1738 		pg->owner_tag = tag;
   1739 		return;
   1740 	}
   1741 
   1742 	/* drop ownership */
   1743 	KASSERT((pg->flags & PG_BUSY) == 0);
   1744 	if (pg->owner_tag == NULL) {
   1745 		printf("uvm_page_own: dropping ownership of an non-owned "
   1746 		    "page (%p)\n", pg);
   1747 		panic("uvm_page_own");
   1748 	}
   1749 	pg->owner_tag = NULL;
   1750 }
   1751 #endif
   1752 
   1753 /*
   1754  * uvm_pageidlezero: zero free pages while the system is idle.
   1755  */
   1756 void
   1757 uvm_pageidlezero(void)
   1758 {
   1759 
   1760 	/*
   1761 	 * Disabled for the moment.  Previous strategy too cache heavy.  In
   1762 	 * the future we may experiment with zeroing the pages held in the
   1763 	 * per-CPU cache (uvm_pgflcache).
   1764 	 */
   1765 }
   1766 
   1767 /*
   1768  * uvm_pagelookup: look up a page
   1769  *
   1770  * => caller should lock object to keep someone from pulling the page
   1771  *	out from under it
   1772  */
   1773 
   1774 struct vm_page *
   1775 uvm_pagelookup(struct uvm_object *obj, voff_t off)
   1776 {
   1777 	struct vm_page *pg;
   1778 
   1779 	/* No - used from DDB. KASSERT(mutex_owned(obj->vmobjlock)); */
   1780 
   1781 	pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
   1782 
   1783 	KASSERT(pg == NULL || obj->uo_npages != 0);
   1784 	KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1785 		(pg->flags & PG_BUSY) != 0);
   1786 	return pg;
   1787 }
   1788 
   1789 /*
   1790  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
   1791  *
   1792  * => caller must lock objects
   1793  * => caller must hold pg->interlock
   1794  */
   1795 
   1796 void
   1797 uvm_pagewire(struct vm_page *pg)
   1798 {
   1799 
   1800 	KASSERT(uvm_page_owner_locked_p(pg));
   1801 	KASSERT(mutex_owned(&pg->interlock));
   1802 #if defined(READAHEAD_STATS)
   1803 	if ((pg->flags & PG_READAHEAD) != 0) {
   1804 		uvm_ra_hit.ev_count++;
   1805 		pg->flags &= ~PG_READAHEAD;
   1806 	}
   1807 #endif /* defined(READAHEAD_STATS) */
   1808 	if (pg->wire_count == 0) {
   1809 		uvm_pagedequeue(pg);
   1810 		atomic_inc_uint(&uvmexp.wired);
   1811 	}
   1812 	pg->wire_count++;
   1813 	KASSERT(pg->wire_count > 0);	/* detect wraparound */
   1814 }
   1815 
   1816 /*
   1817  * uvm_pageunwire: unwire the page.
   1818  *
   1819  * => activate if wire count goes to zero.
   1820  * => caller must lock objects
   1821  * => caller must hold pg->interlock
   1822  */
   1823 
   1824 void
   1825 uvm_pageunwire(struct vm_page *pg)
   1826 {
   1827 
   1828 	KASSERT(uvm_page_owner_locked_p(pg));
   1829 	KASSERT(pg->wire_count != 0);
   1830 	KASSERT(!uvmpdpol_pageisqueued_p(pg));
   1831 	KASSERT(mutex_owned(&pg->interlock));
   1832 	pg->wire_count--;
   1833 	if (pg->wire_count == 0) {
   1834 		uvm_pageactivate(pg);
   1835 		KASSERT(uvmexp.wired != 0);
   1836 		atomic_dec_uint(&uvmexp.wired);
   1837 	}
   1838 }
   1839 
   1840 /*
   1841  * uvm_pagedeactivate: deactivate page
   1842  *
   1843  * => caller must lock objects
   1844  * => caller must check to make sure page is not wired
   1845  * => object that page belongs to must be locked (so we can adjust pg->flags)
   1846  * => caller must clear the reference on the page before calling
   1847  * => caller must hold pg->interlock
   1848  */
   1849 
   1850 void
   1851 uvm_pagedeactivate(struct vm_page *pg)
   1852 {
   1853 
   1854 	KASSERT(uvm_page_owner_locked_p(pg));
   1855 	KASSERT(mutex_owned(&pg->interlock));
   1856 	if (pg->wire_count == 0) {
   1857 		KASSERT(uvmpdpol_pageisqueued_p(pg));
   1858 		uvmpdpol_pagedeactivate(pg);
   1859 	}
   1860 }
   1861 
   1862 /*
   1863  * uvm_pageactivate: activate page
   1864  *
   1865  * => caller must lock objects
   1866  * => caller must hold pg->interlock
   1867  */
   1868 
   1869 void
   1870 uvm_pageactivate(struct vm_page *pg)
   1871 {
   1872 
   1873 	KASSERT(uvm_page_owner_locked_p(pg));
   1874 	KASSERT(mutex_owned(&pg->interlock));
   1875 #if defined(READAHEAD_STATS)
   1876 	if ((pg->flags & PG_READAHEAD) != 0) {
   1877 		uvm_ra_hit.ev_count++;
   1878 		pg->flags &= ~PG_READAHEAD;
   1879 	}
   1880 #endif /* defined(READAHEAD_STATS) */
   1881 	if (pg->wire_count == 0) {
   1882 		uvmpdpol_pageactivate(pg);
   1883 	}
   1884 }
   1885 
   1886 /*
   1887  * uvm_pagedequeue: remove a page from any paging queue
   1888  *
   1889  * => caller must lock objects
   1890  * => caller must hold pg->interlock
   1891  */
   1892 void
   1893 uvm_pagedequeue(struct vm_page *pg)
   1894 {
   1895 
   1896 	KASSERT(uvm_page_owner_locked_p(pg));
   1897 	KASSERT(mutex_owned(&pg->interlock));
   1898 	if (uvmpdpol_pageisqueued_p(pg)) {
   1899 		uvmpdpol_pagedequeue(pg);
   1900 	}
   1901 }
   1902 
   1903 /*
   1904  * uvm_pageenqueue: add a page to a paging queue without activating.
   1905  * used where a page is not really demanded (yet).  eg. read-ahead
   1906  *
   1907  * => caller must lock objects
   1908  * => caller must hold pg->interlock
   1909  */
   1910 void
   1911 uvm_pageenqueue(struct vm_page *pg)
   1912 {
   1913 
   1914 	KASSERT(uvm_page_owner_locked_p(pg));
   1915 	KASSERT(mutex_owned(&pg->interlock));
   1916 	if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
   1917 		uvmpdpol_pageenqueue(pg);
   1918 	}
   1919 }
   1920 
   1921 /*
   1922  * uvm_pagelock: acquire page interlock
   1923  */
   1924 void
   1925 uvm_pagelock(struct vm_page *pg)
   1926 {
   1927 
   1928 	mutex_enter(&pg->interlock);
   1929 }
   1930 
   1931 /*
   1932  * uvm_pagelock2: acquire two page interlocks
   1933  */
   1934 void
   1935 uvm_pagelock2(struct vm_page *pg1, struct vm_page *pg2)
   1936 {
   1937 
   1938 	if (pg1 < pg2) {
   1939 		mutex_enter(&pg1->interlock);
   1940 		mutex_enter(&pg2->interlock);
   1941 	} else {
   1942 		mutex_enter(&pg2->interlock);
   1943 		mutex_enter(&pg1->interlock);
   1944 	}
   1945 }
   1946 
   1947 /*
   1948  * uvm_pageunlock: release page interlock, and if a page replacement intent
   1949  * is set on the page, pass it to uvmpdpol to make real.
   1950  *
   1951  * => caller must hold pg->interlock
   1952  */
   1953 void
   1954 uvm_pageunlock(struct vm_page *pg)
   1955 {
   1956 
   1957 	if ((pg->pqflags & PQ_INTENT_SET) == 0 ||
   1958 	    (pg->pqflags & PQ_INTENT_QUEUED) != 0) {
   1959 	    	mutex_exit(&pg->interlock);
   1960 	    	return;
   1961 	}
   1962 	pg->pqflags |= PQ_INTENT_QUEUED;
   1963 	mutex_exit(&pg->interlock);
   1964 	uvmpdpol_pagerealize(pg);
   1965 }
   1966 
   1967 /*
   1968  * uvm_pageunlock2: release two page interlocks, and for both pages if a
   1969  * page replacement intent is set on the page, pass it to uvmpdpol to make
   1970  * real.
   1971  *
   1972  * => caller must hold pg->interlock
   1973  */
   1974 void
   1975 uvm_pageunlock2(struct vm_page *pg1, struct vm_page *pg2)
   1976 {
   1977 
   1978 	if ((pg1->pqflags & PQ_INTENT_SET) == 0 ||
   1979 	    (pg1->pqflags & PQ_INTENT_QUEUED) != 0) {
   1980 	    	mutex_exit(&pg1->interlock);
   1981 	    	pg1 = NULL;
   1982 	} else {
   1983 		pg1->pqflags |= PQ_INTENT_QUEUED;
   1984 		mutex_exit(&pg1->interlock);
   1985 	}
   1986 
   1987 	if ((pg2->pqflags & PQ_INTENT_SET) == 0 ||
   1988 	    (pg2->pqflags & PQ_INTENT_QUEUED) != 0) {
   1989 	    	mutex_exit(&pg2->interlock);
   1990 	    	pg2 = NULL;
   1991 	} else {
   1992 		pg2->pqflags |= PQ_INTENT_QUEUED;
   1993 		mutex_exit(&pg2->interlock);
   1994 	}
   1995 
   1996 	if (pg1 != NULL) {
   1997 		uvmpdpol_pagerealize(pg1);
   1998 	}
   1999 	if (pg2 != NULL) {
   2000 		uvmpdpol_pagerealize(pg2);
   2001 	}
   2002 }
   2003 
   2004 /*
   2005  * uvm_pagezero: zero fill a page
   2006  *
   2007  * => if page is part of an object then the object should be locked
   2008  *	to protect pg->flags.
   2009  */
   2010 
   2011 void
   2012 uvm_pagezero(struct vm_page *pg)
   2013 {
   2014 
   2015 	uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
   2016 	pmap_zero_page(VM_PAGE_TO_PHYS(pg));
   2017 }
   2018 
   2019 /*
   2020  * uvm_pagecopy: copy a page
   2021  *
   2022  * => if page is part of an object then the object should be locked
   2023  *	to protect pg->flags.
   2024  */
   2025 
   2026 void
   2027 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
   2028 {
   2029 
   2030 	uvm_pagemarkdirty(dst, UVM_PAGE_STATUS_DIRTY);
   2031 	pmap_copy_page(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
   2032 }
   2033 
   2034 /*
   2035  * uvm_pageismanaged: test it see that a page (specified by PA) is managed.
   2036  */
   2037 
   2038 bool
   2039 uvm_pageismanaged(paddr_t pa)
   2040 {
   2041 
   2042 	return (uvm_physseg_find(atop(pa), NULL) != UVM_PHYSSEG_TYPE_INVALID);
   2043 }
   2044 
   2045 /*
   2046  * uvm_page_lookup_freelist: look up the free list for the specified page
   2047  */
   2048 
   2049 int
   2050 uvm_page_lookup_freelist(struct vm_page *pg)
   2051 {
   2052 	uvm_physseg_t upm;
   2053 
   2054 	upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
   2055 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID);
   2056 	return uvm_physseg_get_free_list(upm);
   2057 }
   2058 
   2059 /*
   2060  * uvm_page_owner_locked_p: return true if object associated with page is
   2061  * locked.  this is a weak check for runtime assertions only.
   2062  */
   2063 
   2064 bool
   2065 uvm_page_owner_locked_p(struct vm_page *pg)
   2066 {
   2067 
   2068 	if (pg->uobject != NULL) {
   2069 		return mutex_owned(pg->uobject->vmobjlock);
   2070 	}
   2071 	if (pg->uanon != NULL) {
   2072 		return mutex_owned(pg->uanon->an_lock);
   2073 	}
   2074 	return true;
   2075 }
   2076 
   2077 /*
   2078  * uvm_pagereadonly_p: return if the page should be mapped read-only
   2079  */
   2080 
   2081 bool
   2082 uvm_pagereadonly_p(struct vm_page *pg)
   2083 {
   2084 	struct uvm_object * const uobj = pg->uobject;
   2085 
   2086 	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
   2087 	KASSERT(uobj != NULL || mutex_owned(pg->uanon->an_lock));
   2088 	if ((pg->flags & PG_RDONLY) != 0) {
   2089 		return true;
   2090 	}
   2091 	if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
   2092 		return true;
   2093 	}
   2094 	if (uobj == NULL) {
   2095 		return false;
   2096 	}
   2097 	return UVM_OBJ_NEEDS_WRITEFAULT(uobj);
   2098 }
   2099 
   2100 #ifdef PMAP_DIRECT
   2101 /*
   2102  * Call pmap to translate physical address into a virtual and to run a callback
   2103  * for it. Used to avoid actually mapping the pages, pmap most likely uses direct map
   2104  * or equivalent.
   2105  */
   2106 int
   2107 uvm_direct_process(struct vm_page **pgs, u_int npages, voff_t off, vsize_t len,
   2108             int (*process)(void *, size_t, void *), void *arg)
   2109 {
   2110 	int error = 0;
   2111 	paddr_t pa;
   2112 	size_t todo;
   2113 	voff_t pgoff = (off & PAGE_MASK);
   2114 	struct vm_page *pg;
   2115 
   2116 	KASSERT(npages > 0 && len > 0);
   2117 
   2118 	for (int i = 0; i < npages; i++) {
   2119 		pg = pgs[i];
   2120 
   2121 		KASSERT(len > 0);
   2122 
   2123 		/*
   2124 		 * Caller is responsible for ensuring all the pages are
   2125 		 * available.
   2126 		 */
   2127 		KASSERT(pg != NULL && pg != PGO_DONTCARE);
   2128 
   2129 		pa = VM_PAGE_TO_PHYS(pg);
   2130 		todo = MIN(len, PAGE_SIZE - pgoff);
   2131 
   2132 		error = pmap_direct_process(pa, pgoff, todo, process, arg);
   2133 		if (error)
   2134 			break;
   2135 
   2136 		pgoff = 0;
   2137 		len -= todo;
   2138 	}
   2139 
   2140 	KASSERTMSG(error != 0 || len == 0, "len %lu != 0 for non-error", len);
   2141 	return error;
   2142 }
   2143 #endif /* PMAP_DIRECT */
   2144 
   2145 #if defined(DDB) || defined(DEBUGPRINT)
   2146 
   2147 /*
   2148  * uvm_page_printit: actually print the page
   2149  */
   2150 
   2151 static const char page_flagbits[] = UVM_PGFLAGBITS;
   2152 static const char page_pqflagbits[] = UVM_PQFLAGBITS;
   2153 
   2154 void
   2155 uvm_page_printit(struct vm_page *pg, bool full,
   2156     void (*pr)(const char *, ...))
   2157 {
   2158 	struct vm_page *tpg;
   2159 	struct uvm_object *uobj;
   2160 	struct pgflbucket *pgb;
   2161 	struct pgflist *pgl;
   2162 	char pgbuf[128];
   2163 
   2164 	(*pr)("PAGE %p:\n", pg);
   2165 	snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
   2166 	(*pr)("  flags=%s\n", pgbuf);
   2167 	snprintb(pgbuf, sizeof(pgbuf), page_pqflagbits, pg->pqflags);
   2168 	(*pr)("  pqflags=%s\n", pgbuf);
   2169 	(*pr)("  uobject=%p, uanon=%p, offset=0x%llx\n",
   2170 	    pg->uobject, pg->uanon, (long long)pg->offset);
   2171 	(*pr)("  loan_count=%d wire_count=%d bucket=%d freelist=%d\n",
   2172 	    pg->loan_count, pg->wire_count, uvm_page_get_bucket(pg),
   2173 	    uvm_page_get_freelist(pg));
   2174 	(*pr)("  pa=0x%lx\n", (long)VM_PAGE_TO_PHYS(pg));
   2175 #if defined(UVM_PAGE_TRKOWN)
   2176 	if (pg->flags & PG_BUSY)
   2177 		(*pr)("  owning process = %d, tag=%s\n",
   2178 		    pg->owner, pg->owner_tag);
   2179 	else
   2180 		(*pr)("  page not busy, no owner\n");
   2181 #else
   2182 	(*pr)("  [page ownership tracking disabled]\n");
   2183 #endif
   2184 
   2185 	if (!full)
   2186 		return;
   2187 
   2188 	/* cross-verify object/anon */
   2189 	if ((pg->flags & PG_FREE) == 0) {
   2190 		if (pg->flags & PG_ANON) {
   2191 			if (pg->uanon == NULL || pg->uanon->an_page != pg)
   2192 			    (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
   2193 				(pg->uanon) ? pg->uanon->an_page : NULL);
   2194 			else
   2195 				(*pr)("  anon backpointer is OK\n");
   2196 		} else {
   2197 			uobj = pg->uobject;
   2198 			if (uobj) {
   2199 				(*pr)("  checking object list\n");
   2200 				tpg = uvm_pagelookup(uobj, pg->offset);
   2201 				if (tpg)
   2202 					(*pr)("  page found on object list\n");
   2203 				else
   2204 			(*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
   2205 			}
   2206 		}
   2207 	}
   2208 
   2209 	/* cross-verify page queue */
   2210 	if (pg->flags & PG_FREE) {
   2211 		int fl = uvm_page_get_freelist(pg);
   2212 		int b = uvm_page_get_bucket(pg);
   2213 		pgb = uvm.page_free[fl].pgfl_buckets[b];
   2214 		pgl = &pgb->pgb_colors[VM_PGCOLOR(pg)];
   2215 		(*pr)("  checking pageq list\n");
   2216 		LIST_FOREACH(tpg, pgl, pageq.list) {
   2217 			if (tpg == pg) {
   2218 				break;
   2219 			}
   2220 		}
   2221 		if (tpg)
   2222 			(*pr)("  page found on pageq list\n");
   2223 		else
   2224 			(*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
   2225 	}
   2226 }
   2227 
   2228 /*
   2229  * uvm_page_printall - print a summary of all managed pages
   2230  */
   2231 
   2232 void
   2233 uvm_page_printall(void (*pr)(const char *, ...))
   2234 {
   2235 	uvm_physseg_t i;
   2236 	paddr_t pfn;
   2237 	struct vm_page *pg;
   2238 
   2239 	(*pr)("%18s %4s %4s %18s %18s"
   2240 #ifdef UVM_PAGE_TRKOWN
   2241 	    " OWNER"
   2242 #endif
   2243 	    "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
   2244 	for (i = uvm_physseg_get_first();
   2245 	     uvm_physseg_valid_p(i);
   2246 	     i = uvm_physseg_get_next(i)) {
   2247 		for (pfn = uvm_physseg_get_start(i);
   2248 		     pfn < uvm_physseg_get_end(i);
   2249 		     pfn++) {
   2250 			pg = PHYS_TO_VM_PAGE(ptoa(pfn));
   2251 
   2252 			(*pr)("%18p %04x %08x %18p %18p",
   2253 			    pg, pg->flags, pg->pqflags, pg->uobject,
   2254 			    pg->uanon);
   2255 #ifdef UVM_PAGE_TRKOWN
   2256 			if (pg->flags & PG_BUSY)
   2257 				(*pr)(" %d [%s]", pg->owner, pg->owner_tag);
   2258 #endif
   2259 			(*pr)("\n");
   2260 		}
   2261 	}
   2262 }
   2263 
   2264 /*
   2265  * uvm_page_print_freelists - print a summary freelists
   2266  */
   2267 
   2268 void
   2269 uvm_page_print_freelists(void (*pr)(const char *, ...))
   2270 {
   2271 	struct pgfreelist *pgfl;
   2272 	struct pgflbucket *pgb;
   2273 	int fl, b, c;
   2274 
   2275 	(*pr)("There are %d freelists with %d buckets of %d colors.\n\n",
   2276 	    VM_NFREELIST, uvm.bucketcount, uvmexp.ncolors);
   2277 
   2278 	for (fl = 0; fl < VM_NFREELIST; fl++) {
   2279 		pgfl = &uvm.page_free[fl];
   2280 		(*pr)("freelist(%d) @ %p\n", fl, pgfl);
   2281 		for (b = 0; b < uvm.bucketcount; b++) {
   2282 			pgb = uvm.page_free[fl].pgfl_buckets[b];
   2283 			(*pr)("    bucket(%d) @ %p, nfree = %d, lock @ %p:\n",
   2284 			    b, pgb, pgb->pgb_nfree,
   2285 			    &uvm_freelist_locks[b].lock);
   2286 			for (c = 0; c < uvmexp.ncolors; c++) {
   2287 				(*pr)("        color(%d) @ %p, ", c,
   2288 				    &pgb->pgb_colors[c]);
   2289 				(*pr)("first page = %p\n",
   2290 				    LIST_FIRST(&pgb->pgb_colors[c]));
   2291 			}
   2292 		}
   2293 	}
   2294 }
   2295 
   2296 #endif /* DDB || DEBUGPRINT */
   2297