Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.18
      1 /*	$NetBSD: pmap.c,v 1.18 2016/07/14 05:00:51 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.18 2016/07/14 05:00:51 skrll Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #include "opt_modular.h"
     99 #include "opt_multiprocessor.h"
    100 #include "opt_sysv.h"
    101 
    102 #define __PMAP_PRIVATE
    103 
    104 #include <sys/param.h>
    105 #include <sys/atomic.h>
    106 #include <sys/buf.h>
    107 #include <sys/cpu.h>
    108 #include <sys/mutex.h>
    109 #include <sys/pool.h>
    110 #include <sys/atomic.h>
    111 #include <sys/mutex.h>
    112 #include <sys/atomic.h>
    113 
    114 #include <uvm/uvm.h>
    115 
    116 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    117     && !defined(PMAP_NO_PV_UNCACHED)
    118 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
    119  PMAP_NO_PV_UNCACHED to be defined
    120 #endif
    121 
    122 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    123 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    124 PMAP_COUNTER(remove_user_calls, "remove user calls");
    125 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    126 PMAP_COUNTER(remove_flushes, "remove cache flushes");
    127 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    128 PMAP_COUNTER(remove_pvfirst, "remove pv first");
    129 PMAP_COUNTER(remove_pvsearch, "remove pv search");
    130 
    131 PMAP_COUNTER(prefer_requests, "prefer requests");
    132 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    133 
    134 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    135 
    136 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    137 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    138 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    139 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    140 
    141 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    142 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    143 
    144 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    145 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    146 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    147 PMAP_COUNTER(user_mappings, "user pages mapped");
    148 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    149 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    150 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    151 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    152 PMAP_COUNTER(managed_mappings, "managed pages mapped");
    153 PMAP_COUNTER(mappings, "pages mapped");
    154 PMAP_COUNTER(remappings, "pages remapped");
    155 PMAP_COUNTER(unmappings, "pages unmapped");
    156 PMAP_COUNTER(primary_mappings, "page initial mappings");
    157 PMAP_COUNTER(primary_unmappings, "page final unmappings");
    158 PMAP_COUNTER(tlb_hit, "page mapping");
    159 
    160 PMAP_COUNTER(exec_mappings, "exec pages mapped");
    161 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    162 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    163 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    164 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    165 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    166 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    167 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    168 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    169 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    170 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    171 
    172 PMAP_COUNTER(create, "creates");
    173 PMAP_COUNTER(reference, "references");
    174 PMAP_COUNTER(dereference, "dereferences");
    175 PMAP_COUNTER(destroy, "destroyed");
    176 PMAP_COUNTER(activate, "activations");
    177 PMAP_COUNTER(deactivate, "deactivations");
    178 PMAP_COUNTER(update, "updates");
    179 #ifdef MULTIPROCESSOR
    180 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    181 #endif
    182 PMAP_COUNTER(unwire, "unwires");
    183 PMAP_COUNTER(copy, "copies");
    184 PMAP_COUNTER(clear_modify, "clear_modifies");
    185 PMAP_COUNTER(protect, "protects");
    186 PMAP_COUNTER(page_protect, "page_protects");
    187 
    188 #define PMAP_ASID_RESERVED 0
    189 CTASSERT(PMAP_ASID_RESERVED == 0);
    190 
    191 #ifndef PMAP_SEGTAB_ALIGN
    192 #define PMAP_SEGTAB_ALIGN	/* nothing */
    193 #endif
    194 #ifdef _LP64
    195 pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
    196 #endif
    197 pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
    198 #ifdef _LP64
    199 	.seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab,
    200 #endif
    201 };
    202 
    203 struct pmap_kernel kernel_pmap_store = {
    204 	.kernel_pmap = {
    205 		.pm_count = 1,
    206 		.pm_segtab = &pmap_kern_segtab,
    207 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    208 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    209 	},
    210 };
    211 
    212 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    213 
    214 struct pmap_limits pmap_limits = {	/* VA and PA limits */
    215 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    216 };
    217 
    218 #ifdef UVMHIST
    219 static struct kern_history_ent pmapexechistbuf[10000];
    220 static struct kern_history_ent pmaphistbuf[10000];
    221 UVMHIST_DEFINE(pmapexechist);
    222 UVMHIST_DEFINE(pmaphist);
    223 #endif
    224 
    225 /*
    226  * The pools from which pmap structures and sub-structures are allocated.
    227  */
    228 struct pool pmap_pmap_pool;
    229 struct pool pmap_pv_pool;
    230 
    231 #ifndef PMAP_PV_LOWAT
    232 #define	PMAP_PV_LOWAT	16
    233 #endif
    234 int	pmap_pv_lowat = PMAP_PV_LOWAT;
    235 
    236 bool	pmap_initialized = false;
    237 #define	PMAP_PAGE_COLOROK_P(a, b) \
    238 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    239 u_int	pmap_page_colormask;
    240 
    241 #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    242 
    243 #define PMAP_IS_ACTIVE(pm)						\
    244 	((pm) == pmap_kernel() || 					\
    245 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    246 
    247 /* Forward function declarations */
    248 void pmap_page_remove(struct vm_page *);
    249 static void pmap_pvlist_check(struct vm_page_md *);
    250 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    251 void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int);
    252 
    253 /*
    254  * PV table management functions.
    255  */
    256 void	*pmap_pv_page_alloc(struct pool *, int);
    257 void	pmap_pv_page_free(struct pool *, void *);
    258 
    259 struct pool_allocator pmap_pv_page_allocator = {
    260 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    261 };
    262 
    263 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    264 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    265 
    266 #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
    267 #define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
    268 #define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
    269 #endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
    270 
    271 #ifndef MULTIPROCESSOR
    272 kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
    273 #endif
    274 
    275 /*
    276  * Debug functions.
    277  */
    278 
    279 static inline void
    280 pmap_asid_check(pmap_t pm, const char *func)
    281 {
    282 #ifdef DEBUG
    283 	if (!PMAP_IS_ACTIVE(pm))
    284 		return;
    285 
    286 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
    287 	tlb_asid_t asid = tlb_get_asid();
    288 	if (asid != pai->pai_asid)
    289 		panic("%s: inconsistency for active TLB update: %u <-> %u",
    290 		    func, asid, pai->pai_asid);
    291 #endif
    292 }
    293 
    294 static void
    295 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
    296 {
    297 #ifdef DEBUG
    298 	if (pmap == pmap_kernel()) {
    299 		if (sva < VM_MIN_KERNEL_ADDRESS)
    300 			panic("%s: kva %#"PRIxVADDR" not in range",
    301 			    func, sva);
    302 		if (eva >= pmap_limits.virtual_end)
    303 			panic("%s: kva %#"PRIxVADDR" not in range",
    304 			    func, eva);
    305 	} else {
    306 		if (eva > VM_MAXUSER_ADDRESS)
    307 			panic("%s: uva %#"PRIxVADDR" not in range",
    308 			    func, eva);
    309 		pmap_asid_check(pmap, func);
    310 	}
    311 #endif
    312 }
    313 
    314 /*
    315  * Misc. functions.
    316  */
    317 
    318 bool
    319 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    320 {
    321 	volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
    322 #ifdef MULTIPROCESSOR
    323 	for (;;) {
    324 		u_int old_attr = *attrp;
    325 		if ((old_attr & clear_attributes) == 0)
    326 			return false;
    327 		u_int new_attr = old_attr & ~clear_attributes;
    328 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
    329 			return true;
    330 	}
    331 #else
    332 	unsigned long old_attr = *attrp;
    333 	if ((old_attr & clear_attributes) == 0)
    334 		return false;
    335 	*attrp &= ~clear_attributes;
    336 	return true;
    337 #endif
    338 }
    339 
    340 void
    341 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    342 {
    343 #ifdef MULTIPROCESSOR
    344 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
    345 #else
    346 	mdpg->mdpg_attrs |= set_attributes;
    347 #endif
    348 }
    349 
    350 static void
    351 pmap_page_syncicache(struct vm_page *pg)
    352 {
    353 #ifndef MULTIPROCESSOR
    354 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
    355 #endif
    356 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    357 	pv_entry_t pv = &mdpg->mdpg_first;
    358 	kcpuset_t *onproc;
    359 #ifdef MULTIPROCESSOR
    360 	kcpuset_create(&onproc, true);
    361 	KASSERT(onproc != NULL);
    362 #else
    363 	onproc = NULL;
    364 #endif
    365 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
    366 	pmap_pvlist_check(mdpg);
    367 
    368 	if (pv->pv_pmap != NULL) {
    369 		for (; pv != NULL; pv = pv->pv_next) {
    370 #ifdef MULTIPROCESSOR
    371 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    372 			if (kcpuset_match(onproc, kcpuset_running)) {
    373 				break;
    374 			}
    375 #else
    376 			if (pv->pv_pmap == curpmap) {
    377 				onproc = curcpu()->ci_data.cpu_kcpuset;
    378 				break;
    379 			}
    380 #endif
    381 		}
    382 	}
    383 	pmap_pvlist_check(mdpg);
    384 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    385 	kpreempt_disable();
    386 	pmap_md_page_syncicache(pg, onproc);
    387 	kpreempt_enable();
    388 #ifdef MULTIPROCESSOR
    389 	kcpuset_destroy(onproc);
    390 #endif
    391 }
    392 
    393 /*
    394  * Define the initial bounds of the kernel virtual address space.
    395  */
    396 void
    397 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    398 {
    399 
    400 	*vstartp = pmap_limits.virtual_start;
    401 	*vendp = pmap_limits.virtual_end;
    402 }
    403 
    404 vaddr_t
    405 pmap_growkernel(vaddr_t maxkvaddr)
    406 {
    407 	vaddr_t virtual_end = pmap_limits.virtual_end;
    408 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    409 
    410 	/*
    411 	 * Reserve PTEs for the new KVA space.
    412 	 */
    413 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    414 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    415 	}
    416 
    417 	/*
    418 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    419 	 */
    420 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    421 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    422 
    423 	/*
    424 	 * Update new end.
    425 	 */
    426 	pmap_limits.virtual_end = virtual_end;
    427 	return virtual_end;
    428 }
    429 
    430 /*
    431  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    432  * This function allows for early dynamic memory allocation until the virtual
    433  * memory system has been bootstrapped.  After that point, either kmem_alloc
    434  * or malloc should be used.  This function works by stealing pages from the
    435  * (to be) managed page pool, then implicitly mapping the pages (by using
    436  * their k0seg addresses) and zeroing them.
    437  *
    438  * It may be used once the physical memory segments have been pre-loaded
    439  * into the vm_physmem[] array.  Early memory allocation MUST use this
    440  * interface!  This cannot be used after vm_page_startup(), and will
    441  * generate a panic if tried.
    442  *
    443  * Note that this memory will never be freed, and in essence it is wired
    444  * down.
    445  *
    446  * We must adjust *vstartp and/or *vendp iff we use address space
    447  * from the kernel virtual address range defined by pmap_virtual_space().
    448  */
    449 vaddr_t
    450 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    451 {
    452 	size_t npgs;
    453 	paddr_t pa;
    454 	vaddr_t va;
    455 	struct vm_physseg *maybe_seg = NULL;
    456 	u_int maybe_bank = vm_nphysseg;
    457 
    458 	size = round_page(size);
    459 	npgs = atop(size);
    460 
    461 	aprint_debug("%s: need %zu pages\n", __func__, npgs);
    462 
    463 	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
    464 		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
    465 		if (uvm.page_init_done == true)
    466 			panic("pmap_steal_memory: called _after_ bootstrap");
    467 
    468 		aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
    469 		    __func__, bank,
    470 		    seg->avail_start, seg->start,
    471 		    seg->avail_end, seg->end);
    472 
    473 		if (seg->avail_start != seg->start
    474 		    || seg->avail_start >= seg->avail_end) {
    475 			aprint_debug("%s: seg %u: bad start\n", __func__, bank);
    476 			continue;
    477 		}
    478 
    479 		if (seg->avail_end - seg->avail_start < npgs) {
    480 			aprint_debug("%s: seg %u: too small for %zu pages\n",
    481 			    __func__, bank, npgs);
    482 			continue;
    483 		}
    484 
    485 		if (!pmap_md_ok_to_steal_p(seg, npgs)) {
    486 			continue;
    487 		}
    488 
    489 		/*
    490 		 * Always try to allocate from the segment with the least
    491 		 * amount of space left.
    492 		 */
    493 #define VM_PHYSMEM_SPACE(s)	((s)->avail_end - (s)->avail_start)
    494 		if (maybe_seg == NULL
    495 		    || VM_PHYSMEM_SPACE(seg) < VM_PHYSMEM_SPACE(maybe_seg)) {
    496 			maybe_seg = seg;
    497 			maybe_bank = bank;
    498 		}
    499 	}
    500 
    501 	if (maybe_seg) {
    502 		struct vm_physseg * const seg = maybe_seg;
    503 		u_int bank = maybe_bank;
    504 
    505 		/*
    506 		 * There are enough pages here; steal them!
    507 		 */
    508 		pa = ptoa(seg->avail_start);
    509 		seg->avail_start += npgs;
    510 		seg->start += npgs;
    511 
    512 		/*
    513 		 * Have we used up this segment?
    514 		 */
    515 		if (seg->avail_start == seg->end) {
    516 			if (vm_nphysseg == 1)
    517 				panic("pmap_steal_memory: out of memory!");
    518 
    519 			aprint_debug("%s: seg %u: %zu pages stolen (removed)\n",
    520 			    __func__, bank, npgs);
    521 			/* Remove this segment from the list. */
    522 			vm_nphysseg--;
    523 			for (u_int x = bank; x < vm_nphysseg; x++) {
    524 				/* structure copy */
    525 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
    526 			}
    527 		} else {
    528 			aprint_debug("%s: seg %u: %zu pages stolen (%#"PRIxPADDR" left)\n",
    529 			    __func__, bank, npgs, VM_PHYSMEM_SPACE(seg));
    530 		}
    531 
    532 		va = pmap_md_map_poolpage(pa, size);
    533 		memset((void *)va, 0, size);
    534 		return va;
    535 	}
    536 
    537 	/*
    538 	 * If we got here, there was no memory left.
    539 	 */
    540 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
    541 }
    542 
    543 /*
    544  *	Initialize the pmap module.
    545  *	Called by vm_init, to initialize any structures that the pmap
    546  *	system needs to map virtual memory.
    547  */
    548 void
    549 pmap_init(void)
    550 {
    551 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    552 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    553 
    554 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    555 
    556 	/*
    557 	 * Initialize the segtab lock.
    558 	 */
    559 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    560 
    561 	/*
    562 	 * Set a low water mark on the pv_entry pool, so that we are
    563 	 * more likely to have these around even in extreme memory
    564 	 * starvation.
    565 	 */
    566 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    567 
    568 	/*
    569 	 * Set the page colormask but allow pmap_md_init to override it.
    570 	 */
    571 	pmap_page_colormask = ptoa(uvmexp.colormask);
    572 
    573 	pmap_md_init();
    574 
    575 	/*
    576 	 * Now it is safe to enable pv entry recording.
    577 	 */
    578 	pmap_initialized = true;
    579 }
    580 
    581 /*
    582  *	Create and return a physical map.
    583  *
    584  *	If the size specified for the map
    585  *	is zero, the map is an actual physical
    586  *	map, and may be referenced by the
    587  *	hardware.
    588  *
    589  *	If the size specified is non-zero,
    590  *	the map will be used in software only, and
    591  *	is bounded by that size.
    592  */
    593 pmap_t
    594 pmap_create(void)
    595 {
    596 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    597 	PMAP_COUNT(create);
    598 
    599 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    600 	memset(pmap, 0, PMAP_SIZE);
    601 
    602 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    603 
    604 	pmap->pm_count = 1;
    605 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    606 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    607 
    608 	pmap_segtab_init(pmap);
    609 
    610 #ifdef MULTIPROCESSOR
    611 	kcpuset_create(&pmap->pm_active, true);
    612 	kcpuset_create(&pmap->pm_onproc, true);
    613 	KASSERT(pmap->pm_active != NULL);
    614 	KASSERT(pmap->pm_onproc != NULL);
    615 #endif
    616 
    617 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%p)", pmap, 0, 0, 0);
    618 
    619 	return pmap;
    620 }
    621 
    622 /*
    623  *	Retire the given physical map from service.
    624  *	Should only be called if the map contains
    625  *	no valid mappings.
    626  */
    627 void
    628 pmap_destroy(pmap_t pmap)
    629 {
    630 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    631 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0, 0, 0);
    632 
    633 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    634 		PMAP_COUNT(dereference);
    635 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
    636 		return;
    637 	}
    638 
    639 	PMAP_COUNT(destroy);
    640 	KASSERT(pmap->pm_count == 0);
    641 	kpreempt_disable();
    642 	pmap_md_tlb_miss_lock_enter();
    643 	pmap_tlb_asid_release_all(pmap);
    644 	pmap_segtab_destroy(pmap, NULL, 0);
    645 	pmap_md_tlb_miss_lock_exit();
    646 
    647 #ifdef MULTIPROCESSOR
    648 	kcpuset_destroy(pmap->pm_active);
    649 	kcpuset_destroy(pmap->pm_onproc);
    650 	pmap->pm_active = NULL;
    651 	pmap->pm_onproc = NULL;
    652 #endif
    653 
    654 	pool_put(&pmap_pmap_pool, pmap);
    655 	kpreempt_enable();
    656 
    657 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
    658 }
    659 
    660 /*
    661  *	Add a reference to the specified pmap.
    662  */
    663 void
    664 pmap_reference(pmap_t pmap)
    665 {
    666 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    667 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0, 0, 0);
    668 	PMAP_COUNT(reference);
    669 
    670 	if (pmap != NULL) {
    671 		atomic_inc_uint(&pmap->pm_count);
    672 	}
    673 
    674 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    675 }
    676 
    677 /*
    678  *	Make a new pmap (vmspace) active for the given process.
    679  */
    680 void
    681 pmap_activate(struct lwp *l)
    682 {
    683 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    684 
    685 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    686 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0, 0);
    687 	PMAP_COUNT(activate);
    688 
    689 	kpreempt_disable();
    690 	pmap_md_tlb_miss_lock_enter();
    691 	pmap_tlb_asid_acquire(pmap, l);
    692 	if (l == curlwp) {
    693 		pmap_segtab_activate(pmap, l);
    694 	}
    695 	pmap_md_tlb_miss_lock_exit();
    696 	kpreempt_enable();
    697 
    698 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    699 }
    700 
    701 /*
    702  * Remove this page from all physical maps in which it resides.
    703  * Reflects back modify bits to the pager.
    704  */
    705 void
    706 pmap_page_remove(struct vm_page *pg)
    707 {
    708 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    709 
    710 	kpreempt_disable();
    711 	VM_PAGEMD_PVLIST_LOCK(mdpg);
    712 	pmap_pvlist_check(mdpg);
    713 
    714 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    715 
    716 	pv_entry_t pv = &mdpg->mdpg_first;
    717 	if (pv->pv_pmap == NULL) {
    718 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    719 		kpreempt_enable();
    720 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
    721 		return;
    722 	}
    723 
    724 	pv_entry_t npv;
    725 	pv_entry_t pvp = NULL;
    726 
    727 	for (; pv != NULL; pv = npv) {
    728 		npv = pv->pv_next;
    729 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    730 		if (pv->pv_va & PV_KENTER) {
    731 			UVMHIST_LOG(pmaphist, " pv %p pmap %p va %"
    732 			    PRIxVADDR" skip", pv, pv->pv_pmap, pv->pv_va, 0);
    733 
    734 			KASSERT(pv->pv_pmap == pmap_kernel());
    735 
    736 			/* Assume no more - it'll get fixed if there are */
    737 			pv->pv_next = NULL;
    738 
    739 			/*
    740 			 * pvp is non-null when we already have a PV_KENTER
    741 			 * pv in pvh_first; otherwise we haven't seen a
    742 			 * PV_KENTER pv and we need to copy this one to
    743 			 * pvh_first
    744 			 */
    745 			if (pvp) {
    746 				/*
    747 				 * The previous PV_KENTER pv needs to point to
    748 				 * this PV_KENTER pv
    749 				 */
    750 				pvp->pv_next = pv;
    751 			} else {
    752 				pv_entry_t fpv = &mdpg->mdpg_first;
    753 				*fpv = *pv;
    754 				KASSERT(fpv->pv_pmap == pmap_kernel());
    755 			}
    756 			pvp = pv;
    757 			continue;
    758 		}
    759 #endif
    760 		const pmap_t pmap = pv->pv_pmap;
    761 		vaddr_t va = trunc_page(pv->pv_va);
    762 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    763 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
    764 		    pmap_limits.virtual_end);
    765 		pt_entry_t pte = *ptep;
    766 		UVMHIST_LOG(pmaphist, " pv %p pmap %p va %"PRIxVADDR
    767 		    " pte %#"PRIxPTE, pv, pmap, va, pte_value(pte));
    768 		if (!pte_valid_p(pte))
    769 			continue;
    770 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    771 		if (is_kernel_pmap_p) {
    772 			PMAP_COUNT(remove_kernel_pages);
    773 		} else {
    774 			PMAP_COUNT(remove_user_pages);
    775 		}
    776 		if (pte_wired_p(pte))
    777 			pmap->pm_stats.wired_count--;
    778 		pmap->pm_stats.resident_count--;
    779 
    780 		pmap_md_tlb_miss_lock_enter();
    781 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    782 		*ptep = npte;
    783 		/*
    784 		 * Flush the TLB for the given address.
    785 		 */
    786 		pmap_tlb_invalidate_addr(pmap, va);
    787 		pmap_md_tlb_miss_lock_exit();
    788 
    789 		/*
    790 		 * non-null means this is a non-pvh_first pv, so we should
    791 		 * free it.
    792 		 */
    793 		if (pvp) {
    794 			KASSERT(pvp->pv_pmap == pmap_kernel());
    795 			KASSERT(pvp->pv_next == NULL);
    796 			pmap_pv_free(pv);
    797 		} else {
    798 			pv->pv_pmap = NULL;
    799 			pv->pv_next = NULL;
    800 		}
    801 	}
    802 
    803 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    804 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
    805 #endif
    806 	pmap_pvlist_check(mdpg);
    807 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    808 	kpreempt_enable();
    809 
    810 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    811 }
    812 
    813 
    814 /*
    815  *	Make a previously active pmap (vmspace) inactive.
    816  */
    817 void
    818 pmap_deactivate(struct lwp *l)
    819 {
    820 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    821 
    822 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    823 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0, 0);
    824 	PMAP_COUNT(deactivate);
    825 
    826 	kpreempt_disable();
    827 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
    828 	pmap_md_tlb_miss_lock_enter();
    829 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    830 #ifdef _LP64
    831 	curcpu()->ci_pmap_user_seg0tab = NULL;
    832 #endif
    833 	pmap_tlb_asid_deactivate(pmap);
    834 	pmap_md_tlb_miss_lock_exit();
    835 	kpreempt_enable();
    836 
    837 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    838 }
    839 
    840 void
    841 pmap_update(struct pmap *pmap)
    842 {
    843 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    844 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0, 0, 0);
    845 	PMAP_COUNT(update);
    846 
    847 	kpreempt_disable();
    848 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
    849 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    850 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    851 		PMAP_COUNT(shootdown_ipis);
    852 #endif
    853 	pmap_md_tlb_miss_lock_enter();
    854 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
    855 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    856 #endif /* DEBUG */
    857 
    858 	/*
    859 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    860 	 * our ASID.  Now we have to reactivate ourselves.
    861 	 */
    862 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    863 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    864 		pmap_tlb_asid_acquire(pmap, curlwp);
    865 		pmap_segtab_activate(pmap, curlwp);
    866 	}
    867 	pmap_md_tlb_miss_lock_exit();
    868 	kpreempt_enable();
    869 
    870 	UVMHIST_LOG(pmaphist, " <-- done%s",
    871 	    (pmap == pmap_kernel()) ? " (kernel)" : "", 0, 0, 0);
    872 }
    873 
    874 /*
    875  *	Remove the given range of addresses from the specified map.
    876  *
    877  *	It is assumed that the start and end are properly
    878  *	rounded to the page size.
    879  */
    880 
    881 static bool
    882 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    883 	uintptr_t flags)
    884 {
    885 	const pt_entry_t npte = flags;
    886 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    887 
    888 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    889 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%#"PRIxVADDR"..%#"PRIxVADDR,
    890 	    pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva);
    891 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    892 	    ptep, flags, 0, 0);
    893 
    894 	KASSERT(kpreempt_disabled());
    895 
    896 	for (; sva < eva; sva += NBPG, ptep++) {
    897 		const pt_entry_t pte = *ptep;
    898 		if (!pte_valid_p(pte))
    899 			continue;
    900 		if (is_kernel_pmap_p) {
    901 			PMAP_COUNT(remove_kernel_pages);
    902 		} else {
    903 			PMAP_COUNT(remove_user_pages);
    904 		}
    905 		if (pte_wired_p(pte))
    906 			pmap->pm_stats.wired_count--;
    907 		pmap->pm_stats.resident_count--;
    908 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
    909 		if (__predict_true(pg != NULL)) {
    910 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
    911 		}
    912 		pmap_md_tlb_miss_lock_enter();
    913 		*ptep = npte;
    914 		/*
    915 		 * Flush the TLB for the given address.
    916 		 */
    917 		pmap_tlb_invalidate_addr(pmap, sva);
    918 		pmap_md_tlb_miss_lock_exit();
    919 	}
    920 
    921 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    922 
    923 	return false;
    924 }
    925 
    926 void
    927 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    928 {
    929 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    930 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    931 
    932 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    933 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR")",
    934 	    pmap, sva, eva, 0);
    935 
    936 	if (is_kernel_pmap_p) {
    937 		PMAP_COUNT(remove_kernel_calls);
    938 	} else {
    939 		PMAP_COUNT(remove_user_calls);
    940 	}
    941 #ifdef PMAP_FAULTINFO
    942 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
    943 	curpcb->pcb_faultinfo.pfi_repeats = 0;
    944 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
    945 #endif
    946 	kpreempt_disable();
    947 	pmap_addr_range_check(pmap, sva, eva, __func__);
    948 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
    949 	kpreempt_enable();
    950 
    951 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    952 }
    953 
    954 /*
    955  *	pmap_page_protect:
    956  *
    957  *	Lower the permission for all mappings to a given page.
    958  */
    959 void
    960 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    961 {
    962 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    963 	pv_entry_t pv;
    964 	vaddr_t va;
    965 
    966 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    967 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") prot=%#x)",
    968 	    pg, VM_PAGE_TO_PHYS(pg), prot, 0);
    969 	PMAP_COUNT(page_protect);
    970 
    971 	switch (prot) {
    972 	case VM_PROT_READ|VM_PROT_WRITE:
    973 	case VM_PROT_ALL:
    974 		break;
    975 
    976 	/* copy_on_write */
    977 	case VM_PROT_READ:
    978 	case VM_PROT_READ|VM_PROT_EXECUTE:
    979 		pv = &mdpg->mdpg_first;
    980 		kpreempt_disable();
    981 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
    982 		pmap_pvlist_check(mdpg);
    983 		/*
    984 		 * Loop over all current mappings setting/clearing as apropos.
    985 		 */
    986 		if (pv->pv_pmap != NULL) {
    987 			while (pv != NULL) {
    988 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    989 				if (pv->pv_va & PV_KENTER) {
    990 					pv = pv->pv_next;
    991 					continue;
    992 				}
    993 #endif
    994 				const pmap_t pmap = pv->pv_pmap;
    995 				va = trunc_page(pv->pv_va);
    996 				const uintptr_t gen =
    997 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    998 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
    999 				KASSERT(pv->pv_pmap == pmap);
   1000 				pmap_update(pmap);
   1001 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
   1002 					pv = &mdpg->mdpg_first;
   1003 				} else {
   1004 					pv = pv->pv_next;
   1005 				}
   1006 				pmap_pvlist_check(mdpg);
   1007 			}
   1008 		}
   1009 		pmap_pvlist_check(mdpg);
   1010 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1011 		kpreempt_enable();
   1012 		break;
   1013 
   1014 	/* remove_all */
   1015 	default:
   1016 		pmap_page_remove(pg);
   1017 	}
   1018 
   1019 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1020 }
   1021 
   1022 static bool
   1023 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1024 	uintptr_t flags)
   1025 {
   1026 	const vm_prot_t prot = (flags & VM_PROT_ALL);
   1027 
   1028 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1029 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%#"PRIxVADDR"..%#"PRIxVADDR,
   1030 	    pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva);
   1031 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
   1032 	    ptep, flags, 0, 0);
   1033 
   1034 	KASSERT(kpreempt_disabled());
   1035 	/*
   1036 	 * Change protection on every valid mapping within this segment.
   1037 	 */
   1038 	for (; sva < eva; sva += NBPG, ptep++) {
   1039 		pt_entry_t pte = *ptep;
   1040 		if (!pte_valid_p(pte))
   1041 			continue;
   1042 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1043 		if (pg != NULL && pte_modified_p(pte)) {
   1044 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1045 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1046 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
   1047 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1048 				if (VM_PAGEMD_CACHED_P(mdpg)) {
   1049 #endif
   1050 					UVMHIST_LOG(pmapexechist,
   1051 					    "pg %p (pa %#"PRIxPADDR"): %s",
   1052 					    pg, VM_PAGE_TO_PHYS(pg),
   1053 					    "syncicached performed", 0);
   1054 					pmap_page_syncicache(pg);
   1055 					PMAP_COUNT(exec_synced_protect);
   1056 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1057 				}
   1058 #endif
   1059 			}
   1060 		}
   1061 		pte = pte_prot_downgrade(pte, prot);
   1062 		if (*ptep != pte) {
   1063 			pmap_md_tlb_miss_lock_enter();
   1064 			*ptep = pte;
   1065 			/*
   1066 			 * Update the TLB if needed.
   1067 			 */
   1068 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
   1069 			pmap_md_tlb_miss_lock_exit();
   1070 		}
   1071 	}
   1072 
   1073 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1074 
   1075 	return false;
   1076 }
   1077 
   1078 /*
   1079  *	Set the physical protection on the
   1080  *	specified range of this map as requested.
   1081  */
   1082 void
   1083 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1084 {
   1085 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1086 	UVMHIST_LOG(pmaphist,
   1087 	    "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR", prot=%u)",
   1088 	    pmap, sva, eva, prot);
   1089 	PMAP_COUNT(protect);
   1090 
   1091 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1092 		pmap_remove(pmap, sva, eva);
   1093 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1094 		return;
   1095 	}
   1096 
   1097 	/*
   1098 	 * Change protection on every valid mapping within this segment.
   1099 	 */
   1100 	kpreempt_disable();
   1101 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1102 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
   1103 	kpreempt_enable();
   1104 
   1105 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1106 }
   1107 
   1108 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
   1109 /*
   1110  *	pmap_page_cache:
   1111  *
   1112  *	Change all mappings of a managed page to cached/uncached.
   1113  */
   1114 void
   1115 pmap_page_cache(struct vm_page *pg, bool cached)
   1116 {
   1117 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1118 
   1119 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1120 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)",
   1121 	    pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0);
   1122 
   1123 	KASSERT(kpreempt_disabled());
   1124 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1125 
   1126 	if (cached) {
   1127 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1128 		PMAP_COUNT(page_cache_restorations);
   1129 	} else {
   1130 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1131 		PMAP_COUNT(page_cache_evictions);
   1132 	}
   1133 
   1134 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
   1135 		pmap_t pmap = pv->pv_pmap;
   1136 		vaddr_t va = trunc_page(pv->pv_va);
   1137 
   1138 		KASSERT(pmap != NULL);
   1139 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1140 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1141 		if (ptep == NULL)
   1142 			continue;
   1143 		pt_entry_t pte = *ptep;
   1144 		if (pte_valid_p(pte)) {
   1145 			pte = pte_cached_change(pte, cached);
   1146 			pmap_md_tlb_miss_lock_enter();
   1147 			*ptep = pte;
   1148 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
   1149 			pmap_md_tlb_miss_lock_exit();
   1150 		}
   1151 	}
   1152 
   1153 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1154 }
   1155 #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
   1156 
   1157 /*
   1158  *	Insert the given physical page (p) at
   1159  *	the specified virtual address (v) in the
   1160  *	target physical map with the protection requested.
   1161  *
   1162  *	If specified, the page will be wired down, meaning
   1163  *	that the related pte can not be reclaimed.
   1164  *
   1165  *	NB:  This is the only routine which MAY NOT lazy-evaluate
   1166  *	or lose information.  That is, this routine must actually
   1167  *	insert this page into the given map NOW.
   1168  */
   1169 int
   1170 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1171 {
   1172 	const bool wired = (flags & PMAP_WIRED) != 0;
   1173 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1174 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
   1175 #ifdef UVMHIST
   1176 	struct kern_history * const histp =
   1177 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
   1178 #endif
   1179 
   1180 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp);
   1181 #define VM_PROT_STRING(prot) \
   1182 	&"\0     " \
   1183 	 "(R)\0  " \
   1184 	 "(W)\0  " \
   1185 	 "(RW)\0 " \
   1186 	 "(X)\0  " \
   1187 	 "(RX)\0 " \
   1188 	 "(WX)\0 " \
   1189 	 "(RWX)\0"[UVM_PROTECTION(prot)*6]
   1190 	UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR,
   1191 	    pmap, va, pa, 0);
   1192 	UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)",
   1193 	    prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags));
   1194 
   1195 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1196 	if (is_kernel_pmap_p) {
   1197 		PMAP_COUNT(kernel_mappings);
   1198 		if (!good_color)
   1199 			PMAP_COUNT(kernel_mappings_bad);
   1200 	} else {
   1201 		PMAP_COUNT(user_mappings);
   1202 		if (!good_color)
   1203 			PMAP_COUNT(user_mappings_bad);
   1204 	}
   1205 	pmap_addr_range_check(pmap, va, va, __func__);
   1206 
   1207 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
   1208 	    VM_PROT_READ, prot);
   1209 
   1210 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1211 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1212 
   1213 	if (pg) {
   1214 		/* Set page referenced/modified status based on flags */
   1215 		if (flags & VM_PROT_WRITE) {
   1216 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1217 		} else if (flags & VM_PROT_ALL) {
   1218 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1219 		}
   1220 
   1221 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1222 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
   1223 			flags |= PMAP_NOCACHE;
   1224 			PMAP_COUNT(uncached_mappings);
   1225 		}
   1226 #endif
   1227 
   1228 		PMAP_COUNT(managed_mappings);
   1229 	} else {
   1230 		/*
   1231 		 * Assumption: if it is not part of our managed memory
   1232 		 * then it must be device memory which may be volatile.
   1233 		 */
   1234 		if ((flags & PMAP_CACHE_MASK) == 0)
   1235 			flags |= PMAP_NOCACHE;
   1236 		PMAP_COUNT(unmanaged_mappings);
   1237 	}
   1238 
   1239 	pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags,
   1240 	    is_kernel_pmap_p);
   1241 
   1242 	kpreempt_disable();
   1243 
   1244 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1245 	if (__predict_false(ptep == NULL)) {
   1246 		kpreempt_enable();
   1247 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
   1248 		return ENOMEM;
   1249 	}
   1250 	const pt_entry_t opte = *ptep;
   1251 
   1252 	/* Done after case that may sleep/return. */
   1253 	if (pg)
   1254 		pmap_enter_pv(pmap, va, pg, &npte, 0);
   1255 
   1256 	/*
   1257 	 * Now validate mapping with desired protection/wiring.
   1258 	 * Assume uniform modified and referenced status for all
   1259 	 * MIPS pages in a MACH page.
   1260 	 */
   1261 	if (wired) {
   1262 		pmap->pm_stats.wired_count++;
   1263 		npte = pte_wire_entry(npte);
   1264 	}
   1265 
   1266 	UVMHIST_LOG(*histp, "new pte %#"PRIxPTE" (pa %#"PRIxPADDR")",
   1267 	    pte_value(npte), pa, 0, 0);
   1268 
   1269 	if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) {
   1270 		pmap_remove(pmap, va, va + NBPG);
   1271 		PMAP_COUNT(user_mappings_changed);
   1272 	}
   1273 
   1274 	KASSERT(pte_valid_p(npte));
   1275 	const bool resident = pte_valid_p(opte);
   1276 	if (resident) {
   1277 		update_flags |= PMAP_TLB_NEED_IPI;
   1278 	} else {
   1279 		pmap->pm_stats.resident_count++;
   1280 	}
   1281 
   1282 	pmap_md_tlb_miss_lock_enter();
   1283 	*ptep = npte;
   1284 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
   1285 	pmap_md_tlb_miss_lock_exit();
   1286 	kpreempt_enable();
   1287 
   1288 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1289 		KASSERT(mdpg != NULL);
   1290 		PMAP_COUNT(exec_mappings);
   1291 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1292 			if (!pte_deferred_exec_p(npte)) {
   1293 				UVMHIST_LOG(*histp,
   1294 				    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1295 				    va, pg, "immediate", "");
   1296 				pmap_page_syncicache(pg);
   1297 				pmap_page_set_attributes(mdpg,
   1298 				    VM_PAGEMD_EXECPAGE);
   1299 				PMAP_COUNT(exec_synced_mappings);
   1300 			} else {
   1301 				UVMHIST_LOG(*histp, "va=%#"PRIxVADDR
   1302 				    " pg %p: %s syncicache: pte %#x",
   1303 				    va, pg, "defer", npte);
   1304 			}
   1305 		} else {
   1306 			UVMHIST_LOG(*histp,
   1307 			    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1308 			    va, pg, "no",
   1309 			    (pte_cached_p(npte)
   1310 				? " (already exec)"
   1311 				: " (uncached)"));
   1312 		}
   1313 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1314 		KASSERT(mdpg != NULL);
   1315 		KASSERT(prot & VM_PROT_WRITE);
   1316 		PMAP_COUNT(exec_mappings);
   1317 		pmap_page_syncicache(pg);
   1318 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1319 		UVMHIST_LOG(*histp,
   1320 		    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1321 		    va, pg, "immediate", " (writeable)");
   1322 	}
   1323 
   1324 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
   1325 	return 0;
   1326 }
   1327 
   1328 void
   1329 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1330 {
   1331 	pmap_t pmap = pmap_kernel();
   1332 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1333 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1334 
   1335 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1336 	UVMHIST_LOG(pmaphist,
   1337 	    "(va=%#"PRIxVADDR", pa=%#"PRIxPADDR", prot=%u, flags=%#x)",
   1338 	    va, pa, prot, flags);
   1339 	PMAP_COUNT(kenter_pa);
   1340 
   1341 	if (mdpg == NULL) {
   1342 		PMAP_COUNT(kenter_pa_unmanaged);
   1343 		if ((flags & PMAP_CACHE_MASK) == 0)
   1344 			flags |= PMAP_NOCACHE;
   1345 	} else {
   1346 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1347 			PMAP_COUNT(kenter_pa_bad);
   1348 	}
   1349 
   1350 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1351 	kpreempt_disable();
   1352 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1353 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
   1354 	    pmap_limits.virtual_end);
   1355 	KASSERT(!pte_valid_p(*ptep));
   1356 
   1357 	/*
   1358 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
   1359 	 */
   1360 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1361 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0) {
   1362 		pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
   1363 	}
   1364 #endif
   1365 
   1366 	/*
   1367 	 * We have the option to force this mapping into the TLB but we
   1368 	 * don't.  Instead let the next reference to the page do it.
   1369 	 */
   1370 	pmap_md_tlb_miss_lock_enter();
   1371 	*ptep = npte;
   1372 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1373 	pmap_md_tlb_miss_lock_exit();
   1374 	kpreempt_enable();
   1375 #if DEBUG > 1
   1376 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1377 		if (((long *)va)[i] != ((long *)pa)[i])
   1378 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1379 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1380 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1381 	}
   1382 #endif
   1383 
   1384 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%p)", ptep, 0, 0, 0);
   1385 }
   1386 
   1387 /*
   1388  *	Remove the given range of addresses from the kernel map.
   1389  *
   1390  *	It is assumed that the start and end are properly
   1391  *	rounded to the page size.
   1392  */
   1393 
   1394 static bool
   1395 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1396 	uintptr_t flags)
   1397 {
   1398 	const pt_entry_t new_pte = pte_nv_entry(true);
   1399 
   1400 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1401 	UVMHIST_LOG(pmaphist,
   1402 	    "(pmap=%p, sva=%#"PRIxVADDR", eva=%#"PRIxVADDR", ptep=%p)",
   1403 	    pmap, sva, eva, ptep);
   1404 
   1405 	KASSERT(kpreempt_disabled());
   1406 
   1407 	for (; sva < eva; sva += NBPG, ptep++) {
   1408 		pt_entry_t pte = *ptep;
   1409 		if (!pte_valid_p(pte))
   1410 			continue;
   1411 
   1412 		PMAP_COUNT(kremove_pages);
   1413 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1414 		if (pg != NULL) {
   1415 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
   1416 		}
   1417 
   1418 		pmap_md_tlb_miss_lock_enter();
   1419 		*ptep = new_pte;
   1420 		pmap_tlb_invalidate_addr(pmap, sva);
   1421 		pmap_md_tlb_miss_lock_exit();
   1422 	}
   1423 
   1424 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1425 
   1426 	return false;
   1427 }
   1428 
   1429 void
   1430 pmap_kremove(vaddr_t va, vsize_t len)
   1431 {
   1432 	const vaddr_t sva = trunc_page(va);
   1433 	const vaddr_t eva = round_page(va + len);
   1434 
   1435 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1436 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR", len=%#"PRIxVSIZE")",
   1437 	    va, len, 0, 0);
   1438 
   1439 	kpreempt_disable();
   1440 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1441 	kpreempt_enable();
   1442 
   1443 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1444 }
   1445 
   1446 void
   1447 pmap_remove_all(struct pmap *pmap)
   1448 {
   1449 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1450 	UVMHIST_LOG(pmaphist, "(pm=%p)", pmap, 0, 0, 0);
   1451 
   1452 	KASSERT(pmap != pmap_kernel());
   1453 
   1454 	kpreempt_disable();
   1455 	/*
   1456 	 * Free all of our ASIDs which means we can skip doing all the
   1457 	 * tlb_invalidate_addrs().
   1458 	 */
   1459 	pmap_md_tlb_miss_lock_enter();
   1460 #ifdef MULTIPROCESSOR
   1461 	// This should be the last CPU with this pmap onproc
   1462 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
   1463 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
   1464 #endif
   1465 		pmap_tlb_asid_deactivate(pmap);
   1466 #ifdef MULTIPROCESSOR
   1467 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
   1468 #endif
   1469 	pmap_tlb_asid_release_all(pmap);
   1470 	pmap_md_tlb_miss_lock_exit();
   1471 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1472 
   1473 #ifdef PMAP_FAULTINFO
   1474 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1475 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1476 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
   1477 #endif
   1478 	kpreempt_enable();
   1479 
   1480 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1481 }
   1482 
   1483 /*
   1484  *	Routine:	pmap_unwire
   1485  *	Function:	Clear the wired attribute for a map/virtual-address
   1486  *			pair.
   1487  *	In/out conditions:
   1488  *			The mapping must already exist in the pmap.
   1489  */
   1490 void
   1491 pmap_unwire(pmap_t pmap, vaddr_t va)
   1492 {
   1493 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1494 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR")", pmap, va, 0, 0);
   1495 	PMAP_COUNT(unwire);
   1496 
   1497 	/*
   1498 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1499 	 */
   1500 	kpreempt_disable();
   1501 	pmap_addr_range_check(pmap, va, va, __func__);
   1502 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1503 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
   1504 	    pmap, va);
   1505 	pt_entry_t pte = *ptep;
   1506 	KASSERTMSG(pte_valid_p(pte),
   1507 	    "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p",
   1508 	    pmap, va, pte_value(pte), ptep);
   1509 
   1510 	if (pte_wired_p(pte)) {
   1511 		pmap_md_tlb_miss_lock_enter();
   1512 		*ptep = pte_unwire_entry(pte);
   1513 		pmap_md_tlb_miss_lock_exit();
   1514 		pmap->pm_stats.wired_count--;
   1515 	}
   1516 #ifdef DIAGNOSTIC
   1517 	else {
   1518 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1519 		    __func__, pmap, va);
   1520 	}
   1521 #endif
   1522 	kpreempt_enable();
   1523 
   1524 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1525 }
   1526 
   1527 /*
   1528  *	Routine:	pmap_extract
   1529  *	Function:
   1530  *		Extract the physical page address associated
   1531  *		with the given map/virtual_address pair.
   1532  */
   1533 bool
   1534 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1535 {
   1536 	paddr_t pa;
   1537 
   1538 	if (pmap == pmap_kernel()) {
   1539 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1540 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1541 			goto done;
   1542 		}
   1543 		if (pmap_md_io_vaddr_p(va))
   1544 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1545 
   1546 		if (va >= pmap_limits.virtual_end)
   1547 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
   1548 			    __func__, va);
   1549 	}
   1550 	kpreempt_disable();
   1551 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1552 	if (ptep == NULL || !pte_valid_p(*ptep)) {
   1553 		kpreempt_enable();
   1554 		return false;
   1555 	}
   1556 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1557 	kpreempt_enable();
   1558 done:
   1559 	if (pap != NULL) {
   1560 		*pap = pa;
   1561 	}
   1562 	return true;
   1563 }
   1564 
   1565 /*
   1566  *	Copy the range specified by src_addr/len
   1567  *	from the source map to the range dst_addr/len
   1568  *	in the destination map.
   1569  *
   1570  *	This routine is only advisory and need not do anything.
   1571  */
   1572 void
   1573 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1574     vaddr_t src_addr)
   1575 {
   1576 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1577 	PMAP_COUNT(copy);
   1578 }
   1579 
   1580 /*
   1581  *	pmap_clear_reference:
   1582  *
   1583  *	Clear the reference bit on the specified physical page.
   1584  */
   1585 bool
   1586 pmap_clear_reference(struct vm_page *pg)
   1587 {
   1588 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1589 
   1590 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1591 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))",
   1592 	   pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1593 
   1594 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1595 
   1596 	UVMHIST_LOG(pmaphist, " <-- %s", rv ? "true" : "false", 0, 0, 0);
   1597 
   1598 	return rv;
   1599 }
   1600 
   1601 /*
   1602  *	pmap_is_referenced:
   1603  *
   1604  *	Return whether or not the specified physical page is referenced
   1605  *	by any physical maps.
   1606  */
   1607 bool
   1608 pmap_is_referenced(struct vm_page *pg)
   1609 {
   1610 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1611 }
   1612 
   1613 /*
   1614  *	Clear the modify bits on the specified physical page.
   1615  */
   1616 bool
   1617 pmap_clear_modify(struct vm_page *pg)
   1618 {
   1619 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1620 	pv_entry_t pv = &mdpg->mdpg_first;
   1621 	pv_entry_t pv_next;
   1622 
   1623 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1624 	UVMHIST_LOG(pmaphist, "(pg=%p (%#"PRIxPADDR"))",
   1625 	    pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1626 	PMAP_COUNT(clear_modify);
   1627 
   1628 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1629 		if (pv->pv_pmap == NULL) {
   1630 			UVMHIST_LOG(pmapexechist,
   1631 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1632 			    pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0);
   1633 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1634 			PMAP_COUNT(exec_uncached_clear_modify);
   1635 		} else {
   1636 			UVMHIST_LOG(pmapexechist,
   1637 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1638 			    pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0);
   1639 			pmap_page_syncicache(pg);
   1640 			PMAP_COUNT(exec_synced_clear_modify);
   1641 		}
   1642 	}
   1643 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1644 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
   1645 		return false;
   1646 	}
   1647 	if (pv->pv_pmap == NULL) {
   1648 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
   1649 		return true;
   1650 	}
   1651 
   1652 	/*
   1653 	 * remove write access from any pages that are dirty
   1654 	 * so we can tell if they are written to again later.
   1655 	 * flush the VAC first if there is one.
   1656 	 */
   1657 	kpreempt_disable();
   1658 	KASSERT(!VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1659 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1660 	pmap_pvlist_check(mdpg);
   1661 	for (; pv != NULL; pv = pv_next) {
   1662 		pmap_t pmap = pv->pv_pmap;
   1663 		vaddr_t va = trunc_page(pv->pv_va);
   1664 
   1665 		pv_next = pv->pv_next;
   1666 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1667 		if (pv->pv_va & PV_KENTER)
   1668 			continue;
   1669 #endif
   1670 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1671 		KASSERT(ptep);
   1672 		pt_entry_t pte = pte_prot_nowrite(*ptep);
   1673 		if (*ptep == pte) {
   1674 			continue;
   1675 		}
   1676 		KASSERT(pte_valid_p(pte));
   1677 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1678 		pmap_md_tlb_miss_lock_enter();
   1679 		*ptep = pte;
   1680 		pmap_tlb_invalidate_addr(pmap, va);
   1681 		pmap_md_tlb_miss_lock_exit();
   1682 		pmap_update(pmap);
   1683 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
   1684 			/*
   1685 			 * The list changed!  So restart from the beginning.
   1686 			 */
   1687 			pv_next = &mdpg->mdpg_first;
   1688 			pmap_pvlist_check(mdpg);
   1689 		}
   1690 	}
   1691 	pmap_pvlist_check(mdpg);
   1692 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1693 	kpreempt_enable();
   1694 
   1695 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
   1696 	return true;
   1697 }
   1698 
   1699 /*
   1700  *	pmap_is_modified:
   1701  *
   1702  *	Return whether or not the specified physical page is modified
   1703  *	by any physical maps.
   1704  */
   1705 bool
   1706 pmap_is_modified(struct vm_page *pg)
   1707 {
   1708 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1709 }
   1710 
   1711 /*
   1712  *	pmap_set_modified:
   1713  *
   1714  *	Sets the page modified reference bit for the specified page.
   1715  */
   1716 void
   1717 pmap_set_modified(paddr_t pa)
   1718 {
   1719 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1720 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1721 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1722 }
   1723 
   1724 /******************** pv_entry management ********************/
   1725 
   1726 static void
   1727 pmap_pvlist_check(struct vm_page_md *mdpg)
   1728 {
   1729 #ifdef DEBUG
   1730 	pv_entry_t pv = &mdpg->mdpg_first;
   1731 	if (pv->pv_pmap != NULL) {
   1732 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1733 		const u_int colormask = uvmexp.colormask;
   1734 		u_int colors = 0;
   1735 #endif
   1736 		for (; pv != NULL; pv = pv->pv_next) {
   1737 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1738 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1739 			colors |= __BIT(atop(pv->pv_va) & colormask);
   1740 #endif
   1741 		}
   1742 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1743 		// Assert there if there more than 1 color mapped, that they
   1744 		// are uncached.
   1745 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
   1746 		    || colors == 0 || (colors & (colors-1)) == 0
   1747 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
   1748 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
   1749 #endif
   1750 	}
   1751 #endif /* DEBUG */
   1752 }
   1753 
   1754 /*
   1755  * Enter the pmap and virtual address into the
   1756  * physical to virtual map table.
   1757  */
   1758 void
   1759 pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, pt_entry_t *nptep,
   1760     u_int flags)
   1761 {
   1762 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1763 	pv_entry_t pv, npv, apv;
   1764 #ifdef UVMHIST
   1765 	bool first = false;
   1766 #endif
   1767 
   1768 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1769 	UVMHIST_LOG(pmaphist,
   1770 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (%#"PRIxPADDR")",
   1771 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1772 	UVMHIST_LOG(pmaphist, "nptep=%p (%#"PRIxPTE"))",
   1773 	    nptep, pte_value(*nptep), 0, 0);
   1774 
   1775 	KASSERT(kpreempt_disabled());
   1776 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1777 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
   1778 	    "va %#"PRIxVADDR, va);
   1779 
   1780 	apv = NULL;
   1781 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   1782 again:
   1783 	pv = &mdpg->mdpg_first;
   1784 	pmap_pvlist_check(mdpg);
   1785 	if (pv->pv_pmap == NULL) {
   1786 		KASSERT(pv->pv_next == NULL);
   1787 		/*
   1788 		 * No entries yet, use header as the first entry
   1789 		 */
   1790 		PMAP_COUNT(primary_mappings);
   1791 		PMAP_COUNT(mappings);
   1792 #ifdef UVMHIST
   1793 		first = true;
   1794 #endif
   1795 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1796 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
   1797 		// If the new mapping has an incompatible color the last
   1798 		// mapping of this page, clean the page before using it.
   1799 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
   1800 			pmap_md_vca_clean(pg, PMAP_WBINV);
   1801 		}
   1802 #endif
   1803 		pv->pv_pmap = pmap;
   1804 		pv->pv_va = va | flags;
   1805 	} else {
   1806 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1807 		if (pmap_md_vca_add(pg, va, nptep)) {
   1808 			goto again;
   1809 		}
   1810 #endif
   1811 
   1812 		/*
   1813 		 * There is at least one other VA mapping this page.
   1814 		 * Place this entry after the header.
   1815 		 *
   1816 		 * Note: the entry may already be in the table if
   1817 		 * we are only changing the protection bits.
   1818 		 */
   1819 
   1820 #ifdef PARANOIADIAG
   1821 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1822 #endif
   1823 		for (npv = pv; npv; npv = npv->pv_next) {
   1824 			if (pmap == npv->pv_pmap
   1825 			    && va == trunc_page(npv->pv_va)) {
   1826 #ifdef PARANOIADIAG
   1827 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1828 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
   1829 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
   1830 					printf("%s: found va %#"PRIxVADDR
   1831 					    " pa %#"PRIxPADDR
   1832 					    " in pv_table but != %#"PRIxPTE"\n",
   1833 					    __func__, va, pa, pte_value(pte));
   1834 #endif
   1835 				PMAP_COUNT(remappings);
   1836 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1837 				if (__predict_false(apv != NULL))
   1838 					pmap_pv_free(apv);
   1839 
   1840 				UVMHIST_LOG(pmaphist, " <-- done pv=%p%s",
   1841 				    pv, " (reused)", 0, 0);
   1842 				return;
   1843 			}
   1844 		}
   1845 		if (__predict_true(apv == NULL)) {
   1846 			/*
   1847 			 * To allocate a PV, we have to release the PVLIST lock
   1848 			 * so get the page generation.  We allocate the PV, and
   1849 			 * then reacquire the lock.
   1850 			 */
   1851 			pmap_pvlist_check(mdpg);
   1852 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1853 
   1854 			apv = (pv_entry_t)pmap_pv_alloc();
   1855 			if (apv == NULL)
   1856 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1857 
   1858 			/*
   1859 			 * If the generation has changed, then someone else
   1860 			 * tinkered with this page so we should start over.
   1861 			 */
   1862 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
   1863 				goto again;
   1864 		}
   1865 		npv = apv;
   1866 		apv = NULL;
   1867 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1868 		/*
   1869 		 * If need to deal with virtual cache aliases, keep mappings
   1870 		 * in the kernel pmap at the head of the list.  This allows
   1871 		 * the VCA code to easily use them for cache operations if
   1872 		 * present.
   1873 		 */
   1874 		pmap_t kpmap = pmap_kernel();
   1875 		if (pmap != kpmap) {
   1876 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
   1877 				pv = pv->pv_next;
   1878 			}
   1879 		}
   1880 #endif
   1881 		npv->pv_va = va | flags;
   1882 		npv->pv_pmap = pmap;
   1883 		npv->pv_next = pv->pv_next;
   1884 		pv->pv_next = npv;
   1885 		PMAP_COUNT(mappings);
   1886 	}
   1887 	pmap_pvlist_check(mdpg);
   1888 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1889 	if (__predict_false(apv != NULL))
   1890 		pmap_pv_free(apv);
   1891 
   1892 	UVMHIST_LOG(pmaphist, " <-- done pv=%p%s",
   1893 	    pv, first ? " (first pv)" : "",0,0);
   1894 }
   1895 
   1896 /*
   1897  * Remove a physical to virtual address translation.
   1898  * If cache was inhibited on this page, and there are no more cache
   1899  * conflicts, restore caching.
   1900  * Flush the cache if the last page is removed (should always be cached
   1901  * at this point).
   1902  */
   1903 void
   1904 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   1905 {
   1906 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1907 	pv_entry_t pv, npv;
   1908 	bool last;
   1909 
   1910 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1911 	UVMHIST_LOG(pmaphist,
   1912 	    "(pmap=%p, va=%#"PRIxVADDR", pg=%p (pa %#"PRIxPADDR")",
   1913 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1914 	UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0, 0, 0);
   1915 
   1916 	KASSERT(kpreempt_disabled());
   1917 	KASSERT((va & PAGE_MASK) == 0);
   1918 	pv = &mdpg->mdpg_first;
   1919 
   1920 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   1921 	pmap_pvlist_check(mdpg);
   1922 
   1923 	/*
   1924 	 * If it is the first entry on the list, it is actually
   1925 	 * in the header and we must copy the following entry up
   1926 	 * to the header.  Otherwise we must search the list for
   1927 	 * the entry.  In either case we free the now unused entry.
   1928 	 */
   1929 
   1930 	last = false;
   1931 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
   1932 		npv = pv->pv_next;
   1933 		if (npv) {
   1934 			*pv = *npv;
   1935 			KASSERT(pv->pv_pmap != NULL);
   1936 		} else {
   1937 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1938 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1939 #endif
   1940 			pv->pv_pmap = NULL;
   1941 			last = true;	/* Last mapping removed */
   1942 		}
   1943 		PMAP_COUNT(remove_pvfirst);
   1944 	} else {
   1945 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   1946 			PMAP_COUNT(remove_pvsearch);
   1947 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
   1948 				break;
   1949 		}
   1950 		if (npv) {
   1951 			pv->pv_next = npv->pv_next;
   1952 		}
   1953 	}
   1954 
   1955 	pmap_pvlist_check(mdpg);
   1956 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1957 
   1958 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1959 	pmap_md_vca_remove(pg, va, dirty, last);
   1960 #endif
   1961 
   1962 	/*
   1963 	 * Free the pv_entry if needed.
   1964 	 */
   1965 	if (npv)
   1966 		pmap_pv_free(npv);
   1967 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   1968 		if (last) {
   1969 			/*
   1970 			 * If this was the page's last mapping, we no longer
   1971 			 * care about its execness.
   1972 			 */
   1973 			UVMHIST_LOG(pmapexechist,
   1974 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1975 			    pg, VM_PAGE_TO_PHYS(pg),
   1976 			    last ? " [last mapping]" : "",
   1977 			    "execpage cleared");
   1978 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1979 			PMAP_COUNT(exec_uncached_remove);
   1980 		} else {
   1981 			/*
   1982 			 * Someone still has it mapped as an executable page
   1983 			 * so we must sync it.
   1984 			 */
   1985 			UVMHIST_LOG(pmapexechist,
   1986 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1987 			    pg, VM_PAGE_TO_PHYS(pg),
   1988 			    last ? " [last mapping]" : "",
   1989 			    "performed syncicache");
   1990 			pmap_page_syncicache(pg);
   1991 			PMAP_COUNT(exec_synced_remove);
   1992 		}
   1993 	}
   1994 
   1995 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1996 }
   1997 
   1998 #if defined(MULTIPROCESSOR)
   1999 struct pmap_pvlist_info {
   2000 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   2001 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   2002 	volatile u_int pli_lock_index;
   2003 	u_int pli_lock_mask;
   2004 } pmap_pvlist_info;
   2005 
   2006 void
   2007 pmap_pvlist_lock_init(size_t cache_line_size)
   2008 {
   2009 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2010 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   2011 	vaddr_t lock_va = lock_page;
   2012 	if (sizeof(kmutex_t) > cache_line_size) {
   2013 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   2014 	}
   2015 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   2016 	KASSERT((nlocks & (nlocks - 1)) == 0);
   2017 	/*
   2018 	 * Now divide the page into a number of mutexes, one per cacheline.
   2019 	 */
   2020 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   2021 		kmutex_t * const lock = (kmutex_t *)lock_va;
   2022 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
   2023 		pli->pli_locks[i] = lock;
   2024 	}
   2025 	pli->pli_lock_mask = nlocks - 1;
   2026 }
   2027 
   2028 kmutex_t *
   2029 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2030 {
   2031 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2032 	kmutex_t *lock = mdpg->mdpg_lock;
   2033 
   2034 	/*
   2035 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   2036 	 * semi-random distribution not based on page color.
   2037 	 */
   2038 	if (__predict_false(lock == NULL)) {
   2039 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   2040 		size_t lockid = locknum & pli->pli_lock_mask;
   2041 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   2042 		/*
   2043 		 * Set the lock.  If some other thread already did, just use
   2044 		 * the one they assigned.
   2045 		 */
   2046 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   2047 		if (lock == NULL) {
   2048 			lock = new_lock;
   2049 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   2050 		}
   2051 	}
   2052 
   2053 	/*
   2054 	 * Now finally provide the lock.
   2055 	 */
   2056 	return lock;
   2057 }
   2058 #else /* !MULTIPROCESSOR */
   2059 void
   2060 pmap_pvlist_lock_init(size_t cache_line_size)
   2061 {
   2062 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
   2063 }
   2064 
   2065 #ifdef MODULAR
   2066 kmutex_t *
   2067 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2068 {
   2069 	/*
   2070 	 * We just use a global lock.
   2071 	 */
   2072 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   2073 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   2074 	}
   2075 
   2076 	/*
   2077 	 * Now finally provide the lock.
   2078 	 */
   2079 	return mdpg->mdpg_lock;
   2080 }
   2081 #endif /* MODULAR */
   2082 #endif /* !MULTIPROCESSOR */
   2083 
   2084 /*
   2085  * pmap_pv_page_alloc:
   2086  *
   2087  *	Allocate a page for the pv_entry pool.
   2088  */
   2089 void *
   2090 pmap_pv_page_alloc(struct pool *pp, int flags)
   2091 {
   2092 	struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   2093 	if (pg == NULL)
   2094 		return NULL;
   2095 
   2096 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   2097 }
   2098 
   2099 /*
   2100  * pmap_pv_page_free:
   2101  *
   2102  *	Free a pv_entry pool page.
   2103  */
   2104 void
   2105 pmap_pv_page_free(struct pool *pp, void *v)
   2106 {
   2107 	vaddr_t va = (vaddr_t)v;
   2108 
   2109 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2110 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2111 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2112 	KASSERT(pg != NULL);
   2113 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2114 	kpreempt_disable();
   2115 	pmap_md_vca_remove(pg, va, true, true);
   2116 	kpreempt_enable();
   2117 #endif
   2118 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2119 	uvm_pagefree(pg);
   2120 }
   2121 
   2122 #ifdef PMAP_PREFER
   2123 /*
   2124  * Find first virtual address >= *vap that doesn't cause
   2125  * a cache alias conflict.
   2126  */
   2127 void
   2128 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   2129 {
   2130 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   2131 
   2132 	PMAP_COUNT(prefer_requests);
   2133 
   2134 	prefer_mask |= pmap_md_cache_prefer_mask();
   2135 
   2136 	if (prefer_mask) {
   2137 		vaddr_t	va = *vap;
   2138 		vsize_t d = (foff - va) & prefer_mask;
   2139 		if (d) {
   2140 			if (td)
   2141 				*vap = trunc_page(va - ((-d) & prefer_mask));
   2142 			else
   2143 				*vap = round_page(va + d);
   2144 			PMAP_COUNT(prefer_adjustments);
   2145 		}
   2146 	}
   2147 }
   2148 #endif /* PMAP_PREFER */
   2149 
   2150 #ifdef PMAP_MAP_POOLPAGE
   2151 vaddr_t
   2152 pmap_map_poolpage(paddr_t pa)
   2153 {
   2154 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2155 	KASSERT(pg);
   2156 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2157 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   2158 
   2159 	return pmap_md_map_poolpage(pa, NBPG);
   2160 }
   2161 
   2162 paddr_t
   2163 pmap_unmap_poolpage(vaddr_t va)
   2164 {
   2165 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2166 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2167 
   2168 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2169 	KASSERT(pg != NULL);
   2170 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2171 	pmap_md_unmap_poolpage(va, NBPG);
   2172 
   2173 	return pa;
   2174 }
   2175 #endif /* PMAP_MAP_POOLPAGE */
   2176