Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.41
      1 /*	$NetBSD: pmap.c,v 1.41 2019/06/19 09:56:17 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.41 2019/06/19 09:56:17 skrll Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #include "opt_modular.h"
     99 #include "opt_multiprocessor.h"
    100 #include "opt_sysv.h"
    101 
    102 #define __PMAP_PRIVATE
    103 
    104 #include <sys/param.h>
    105 #include <sys/atomic.h>
    106 #include <sys/buf.h>
    107 #include <sys/cpu.h>
    108 #include <sys/mutex.h>
    109 #include <sys/pool.h>
    110 #include <sys/atomic.h>
    111 #include <sys/mutex.h>
    112 #include <sys/atomic.h>
    113 
    114 #include <uvm/uvm.h>
    115 #include <uvm/uvm_physseg.h>
    116 
    117 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    118     && !defined(PMAP_NO_PV_UNCACHED)
    119 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
    120  PMAP_NO_PV_UNCACHED to be defined
    121 #endif
    122 
    123 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    124 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    125 PMAP_COUNTER(remove_user_calls, "remove user calls");
    126 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    127 PMAP_COUNTER(remove_flushes, "remove cache flushes");
    128 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    129 PMAP_COUNTER(remove_pvfirst, "remove pv first");
    130 PMAP_COUNTER(remove_pvsearch, "remove pv search");
    131 
    132 PMAP_COUNTER(prefer_requests, "prefer requests");
    133 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    134 
    135 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    136 
    137 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    138 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    139 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    140 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    141 
    142 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    143 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    144 
    145 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    146 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    147 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    148 PMAP_COUNTER(user_mappings, "user pages mapped");
    149 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    150 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    151 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    152 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    153 PMAP_COUNTER(managed_mappings, "managed pages mapped");
    154 PMAP_COUNTER(mappings, "pages mapped");
    155 PMAP_COUNTER(remappings, "pages remapped");
    156 PMAP_COUNTER(unmappings, "pages unmapped");
    157 PMAP_COUNTER(primary_mappings, "page initial mappings");
    158 PMAP_COUNTER(primary_unmappings, "page final unmappings");
    159 PMAP_COUNTER(tlb_hit, "page mapping");
    160 
    161 PMAP_COUNTER(exec_mappings, "exec pages mapped");
    162 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    163 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    164 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    165 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    166 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    167 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    168 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    169 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    170 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    171 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    172 
    173 PMAP_COUNTER(create, "creates");
    174 PMAP_COUNTER(reference, "references");
    175 PMAP_COUNTER(dereference, "dereferences");
    176 PMAP_COUNTER(destroy, "destroyed");
    177 PMAP_COUNTER(activate, "activations");
    178 PMAP_COUNTER(deactivate, "deactivations");
    179 PMAP_COUNTER(update, "updates");
    180 #ifdef MULTIPROCESSOR
    181 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    182 #endif
    183 PMAP_COUNTER(unwire, "unwires");
    184 PMAP_COUNTER(copy, "copies");
    185 PMAP_COUNTER(clear_modify, "clear_modifies");
    186 PMAP_COUNTER(protect, "protects");
    187 PMAP_COUNTER(page_protect, "page_protects");
    188 
    189 #define PMAP_ASID_RESERVED 0
    190 CTASSERT(PMAP_ASID_RESERVED == 0);
    191 
    192 #ifndef PMAP_SEGTAB_ALIGN
    193 #define PMAP_SEGTAB_ALIGN	/* nothing */
    194 #endif
    195 #ifdef _LP64
    196 pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
    197 #endif
    198 pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
    199 #ifdef _LP64
    200 	.seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab,
    201 #endif
    202 };
    203 
    204 struct pmap_kernel kernel_pmap_store = {
    205 	.kernel_pmap = {
    206 		.pm_count = 1,
    207 		.pm_segtab = &pmap_kern_segtab,
    208 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    209 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    210 	},
    211 };
    212 
    213 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    214 
    215 struct pmap_limits pmap_limits = {	/* VA and PA limits */
    216 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    217 };
    218 
    219 #ifdef UVMHIST
    220 static struct kern_history_ent pmapexechistbuf[10000];
    221 static struct kern_history_ent pmaphistbuf[10000];
    222 UVMHIST_DEFINE(pmapexechist);
    223 UVMHIST_DEFINE(pmaphist);
    224 #endif
    225 
    226 /*
    227  * The pools from which pmap structures and sub-structures are allocated.
    228  */
    229 struct pool pmap_pmap_pool;
    230 struct pool pmap_pv_pool;
    231 
    232 #ifndef PMAP_PV_LOWAT
    233 #define	PMAP_PV_LOWAT	16
    234 #endif
    235 int	pmap_pv_lowat = PMAP_PV_LOWAT;
    236 
    237 bool	pmap_initialized = false;
    238 #define	PMAP_PAGE_COLOROK_P(a, b) \
    239 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    240 u_int	pmap_page_colormask;
    241 
    242 #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    243 
    244 #define PMAP_IS_ACTIVE(pm)						\
    245 	((pm) == pmap_kernel() || 					\
    246 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    247 
    248 /* Forward function declarations */
    249 void pmap_page_remove(struct vm_page *);
    250 static void pmap_pvlist_check(struct vm_page_md *);
    251 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    252 void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int);
    253 
    254 /*
    255  * PV table management functions.
    256  */
    257 void	*pmap_pv_page_alloc(struct pool *, int);
    258 void	pmap_pv_page_free(struct pool *, void *);
    259 
    260 struct pool_allocator pmap_pv_page_allocator = {
    261 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    262 };
    263 
    264 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    265 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    266 
    267 #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
    268 #define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
    269 #define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
    270 #endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
    271 
    272 #ifndef MULTIPROCESSOR
    273 kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
    274 #endif
    275 
    276 /*
    277  * Debug functions.
    278  */
    279 
    280 #ifdef DEBUG
    281 static inline void
    282 pmap_asid_check(pmap_t pm, const char *func)
    283 {
    284 	if (!PMAP_IS_ACTIVE(pm))
    285 		return;
    286 
    287 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
    288 	tlb_asid_t asid = tlb_get_asid();
    289 	if (asid != pai->pai_asid)
    290 		panic("%s: inconsistency for active TLB update: %u <-> %u",
    291 		    func, asid, pai->pai_asid);
    292 }
    293 #endif
    294 
    295 static void
    296 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
    297 {
    298 #ifdef DEBUG
    299 	if (pmap == pmap_kernel()) {
    300 		if (sva < VM_MIN_KERNEL_ADDRESS)
    301 			panic("%s: kva %#"PRIxVADDR" not in range",
    302 			    func, sva);
    303 		if (eva >= pmap_limits.virtual_end)
    304 			panic("%s: kva %#"PRIxVADDR" not in range",
    305 			    func, eva);
    306 	} else {
    307 		if (eva > VM_MAXUSER_ADDRESS)
    308 			panic("%s: uva %#"PRIxVADDR" not in range",
    309 			    func, eva);
    310 		pmap_asid_check(pmap, func);
    311 	}
    312 #endif
    313 }
    314 
    315 /*
    316  * Misc. functions.
    317  */
    318 
    319 bool
    320 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    321 {
    322 	volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
    323 #ifdef MULTIPROCESSOR
    324 	for (;;) {
    325 		u_int old_attr = *attrp;
    326 		if ((old_attr & clear_attributes) == 0)
    327 			return false;
    328 		u_int new_attr = old_attr & ~clear_attributes;
    329 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
    330 			return true;
    331 	}
    332 #else
    333 	unsigned long old_attr = *attrp;
    334 	if ((old_attr & clear_attributes) == 0)
    335 		return false;
    336 	*attrp &= ~clear_attributes;
    337 	return true;
    338 #endif
    339 }
    340 
    341 void
    342 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    343 {
    344 #ifdef MULTIPROCESSOR
    345 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
    346 #else
    347 	mdpg->mdpg_attrs |= set_attributes;
    348 #endif
    349 }
    350 
    351 static void
    352 pmap_page_syncicache(struct vm_page *pg)
    353 {
    354 #ifndef MULTIPROCESSOR
    355 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
    356 #endif
    357 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    358 	pv_entry_t pv = &mdpg->mdpg_first;
    359 	kcpuset_t *onproc;
    360 #ifdef MULTIPROCESSOR
    361 	kcpuset_create(&onproc, true);
    362 	KASSERT(onproc != NULL);
    363 #else
    364 	onproc = NULL;
    365 #endif
    366 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
    367 	pmap_pvlist_check(mdpg);
    368 
    369 	if (pv->pv_pmap != NULL) {
    370 		for (; pv != NULL; pv = pv->pv_next) {
    371 #ifdef MULTIPROCESSOR
    372 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    373 			if (kcpuset_match(onproc, kcpuset_running)) {
    374 				break;
    375 			}
    376 #else
    377 			if (pv->pv_pmap == curpmap) {
    378 				onproc = curcpu()->ci_data.cpu_kcpuset;
    379 				break;
    380 			}
    381 #endif
    382 		}
    383 	}
    384 	pmap_pvlist_check(mdpg);
    385 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    386 	kpreempt_disable();
    387 	pmap_md_page_syncicache(pg, onproc);
    388 	kpreempt_enable();
    389 #ifdef MULTIPROCESSOR
    390 	kcpuset_destroy(onproc);
    391 #endif
    392 }
    393 
    394 /*
    395  * Define the initial bounds of the kernel virtual address space.
    396  */
    397 void
    398 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    399 {
    400 
    401 	*vstartp = pmap_limits.virtual_start;
    402 	*vendp = pmap_limits.virtual_end;
    403 }
    404 
    405 vaddr_t
    406 pmap_growkernel(vaddr_t maxkvaddr)
    407 {
    408 	vaddr_t virtual_end = pmap_limits.virtual_end;
    409 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    410 
    411 	/*
    412 	 * Reserve PTEs for the new KVA space.
    413 	 */
    414 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    415 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    416 	}
    417 
    418 	/*
    419 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    420 	 */
    421 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    422 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    423 
    424 	/*
    425 	 * Update new end.
    426 	 */
    427 	pmap_limits.virtual_end = virtual_end;
    428 	return virtual_end;
    429 }
    430 
    431 /*
    432  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    433  * This function allows for early dynamic memory allocation until the virtual
    434  * memory system has been bootstrapped.  After that point, either kmem_alloc
    435  * or malloc should be used.  This function works by stealing pages from the
    436  * (to be) managed page pool, then implicitly mapping the pages (by using
    437  * their direct mapped addresses) and zeroing them.
    438  *
    439  * It may be used once the physical memory segments have been pre-loaded
    440  * into the vm_physmem[] array.  Early memory allocation MUST use this
    441  * interface!  This cannot be used after vm_page_startup(), and will
    442  * generate a panic if tried.
    443  *
    444  * Note that this memory will never be freed, and in essence it is wired
    445  * down.
    446  *
    447  * We must adjust *vstartp and/or *vendp iff we use address space
    448  * from the kernel virtual address range defined by pmap_virtual_space().
    449  */
    450 vaddr_t
    451 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    452 {
    453 	size_t npgs;
    454 	paddr_t pa;
    455 	vaddr_t va;
    456 
    457 	uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
    458 
    459 	size = round_page(size);
    460 	npgs = atop(size);
    461 
    462 	aprint_debug("%s: need %zu pages\n", __func__, npgs);
    463 
    464 	for (uvm_physseg_t bank = uvm_physseg_get_first();
    465 	     uvm_physseg_valid_p(bank);
    466 	     bank = uvm_physseg_get_next(bank)) {
    467 
    468 		if (uvm.page_init_done == true)
    469 			panic("pmap_steal_memory: called _after_ bootstrap");
    470 
    471 		aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
    472 		    __func__, bank,
    473 		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
    474 		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
    475 
    476 		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
    477 		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
    478 			aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
    479 			continue;
    480 		}
    481 
    482 		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
    483 			aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
    484 			    __func__, bank, npgs);
    485 			continue;
    486 		}
    487 
    488 		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
    489 			continue;
    490 		}
    491 
    492 		/*
    493 		 * Always try to allocate from the segment with the least
    494 		 * amount of space left.
    495 		 */
    496 #define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
    497 		if (uvm_physseg_valid_p(maybe_bank) == false
    498 		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
    499 			maybe_bank = bank;
    500 		}
    501 	}
    502 
    503 	if (uvm_physseg_valid_p(maybe_bank)) {
    504 		const uvm_physseg_t bank = maybe_bank;
    505 
    506 		/*
    507 		 * There are enough pages here; steal them!
    508 		 */
    509 		pa = ptoa(uvm_physseg_get_start(bank));
    510 		uvm_physseg_unplug(atop(pa), npgs);
    511 
    512 		aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
    513 		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
    514 
    515 		va = pmap_md_map_poolpage(pa, size);
    516 		memset((void *)va, 0, size);
    517 		return va;
    518 	}
    519 
    520 	/*
    521 	 * If we got here, there was no memory left.
    522 	 */
    523 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
    524 }
    525 
    526 /*
    527  *	Initialize the pmap module.
    528  *	Called by vm_init, to initialize any structures that the pmap
    529  *	system needs to map virtual memory.
    530  */
    531 void
    532 pmap_init(void)
    533 {
    534 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    535 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    536 
    537 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    538 
    539 	/*
    540 	 * Initialize the segtab lock.
    541 	 */
    542 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    543 
    544 	/*
    545 	 * Set a low water mark on the pv_entry pool, so that we are
    546 	 * more likely to have these around even in extreme memory
    547 	 * starvation.
    548 	 */
    549 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    550 
    551 	/*
    552 	 * Set the page colormask but allow pmap_md_init to override it.
    553 	 */
    554 	pmap_page_colormask = ptoa(uvmexp.colormask);
    555 
    556 	pmap_md_init();
    557 
    558 	/*
    559 	 * Now it is safe to enable pv entry recording.
    560 	 */
    561 	pmap_initialized = true;
    562 }
    563 
    564 /*
    565  *	Create and return a physical map.
    566  *
    567  *	If the size specified for the map
    568  *	is zero, the map is an actual physical
    569  *	map, and may be referenced by the
    570  *	hardware.
    571  *
    572  *	If the size specified is non-zero,
    573  *	the map will be used in software only, and
    574  *	is bounded by that size.
    575  */
    576 pmap_t
    577 pmap_create(void)
    578 {
    579 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    580 	PMAP_COUNT(create);
    581 
    582 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    583 	memset(pmap, 0, PMAP_SIZE);
    584 
    585 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    586 
    587 	pmap->pm_count = 1;
    588 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    589 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    590 
    591 	pmap_segtab_init(pmap);
    592 
    593 #ifdef MULTIPROCESSOR
    594 	kcpuset_create(&pmap->pm_active, true);
    595 	kcpuset_create(&pmap->pm_onproc, true);
    596 	KASSERT(pmap->pm_active != NULL);
    597 	KASSERT(pmap->pm_onproc != NULL);
    598 #endif
    599 
    600 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
    601 	    0, 0, 0);
    602 
    603 	return pmap;
    604 }
    605 
    606 /*
    607  *	Retire the given physical map from service.
    608  *	Should only be called if the map contains
    609  *	no valid mappings.
    610  */
    611 void
    612 pmap_destroy(pmap_t pmap)
    613 {
    614 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    615 	UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    616 
    617 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    618 		PMAP_COUNT(dereference);
    619 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
    620 		return;
    621 	}
    622 
    623 	PMAP_COUNT(destroy);
    624 	KASSERT(pmap->pm_count == 0);
    625 	kpreempt_disable();
    626 	pmap_md_tlb_miss_lock_enter();
    627 	pmap_tlb_asid_release_all(pmap);
    628 	pmap_segtab_destroy(pmap, NULL, 0);
    629 	pmap_md_tlb_miss_lock_exit();
    630 
    631 #ifdef MULTIPROCESSOR
    632 	kcpuset_destroy(pmap->pm_active);
    633 	kcpuset_destroy(pmap->pm_onproc);
    634 	pmap->pm_active = NULL;
    635 	pmap->pm_onproc = NULL;
    636 #endif
    637 
    638 	pool_put(&pmap_pmap_pool, pmap);
    639 	kpreempt_enable();
    640 
    641 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
    642 }
    643 
    644 /*
    645  *	Add a reference to the specified pmap.
    646  */
    647 void
    648 pmap_reference(pmap_t pmap)
    649 {
    650 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    651 	UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    652 	PMAP_COUNT(reference);
    653 
    654 	if (pmap != NULL) {
    655 		atomic_inc_uint(&pmap->pm_count);
    656 	}
    657 
    658 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    659 }
    660 
    661 /*
    662  *	Make a new pmap (vmspace) active for the given process.
    663  */
    664 void
    665 pmap_activate(struct lwp *l)
    666 {
    667 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    668 
    669 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    670 	UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    671 	    (uintptr_t)pmap, 0, 0);
    672 	PMAP_COUNT(activate);
    673 
    674 	kpreempt_disable();
    675 	pmap_md_tlb_miss_lock_enter();
    676 	pmap_tlb_asid_acquire(pmap, l);
    677 	if (l == curlwp) {
    678 		pmap_segtab_activate(pmap, l);
    679 	}
    680 	pmap_md_tlb_miss_lock_exit();
    681 	kpreempt_enable();
    682 
    683 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    684 	    l->l_lid, 0, 0);
    685 }
    686 
    687 /*
    688  * Remove this page from all physical maps in which it resides.
    689  * Reflects back modify bits to the pager.
    690  */
    691 void
    692 pmap_page_remove(struct vm_page *pg)
    693 {
    694 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    695 
    696 	kpreempt_disable();
    697 	VM_PAGEMD_PVLIST_LOCK(mdpg);
    698 	pmap_pvlist_check(mdpg);
    699 
    700 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    701 
    702 	UVMHIST_LOG(pmapexechist, "pg %#jx (pa %#jx) [page removed]: "
    703 	    "execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
    704 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    705 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED);
    706 #else
    707 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
    708 #endif
    709 	PMAP_COUNT(exec_uncached_remove);
    710 
    711 	pv_entry_t pv = &mdpg->mdpg_first;
    712 	if (pv->pv_pmap == NULL) {
    713 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    714 		kpreempt_enable();
    715 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
    716 		return;
    717 	}
    718 
    719 	pv_entry_t npv;
    720 	pv_entry_t pvp = NULL;
    721 
    722 	for (; pv != NULL; pv = npv) {
    723 		npv = pv->pv_next;
    724 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    725 		if (pv->pv_va & PV_KENTER) {
    726 			UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx"
    727 			    " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
    728 			    pv->pv_va, 0);
    729 
    730 			KASSERT(pv->pv_pmap == pmap_kernel());
    731 
    732 			/* Assume no more - it'll get fixed if there are */
    733 			pv->pv_next = NULL;
    734 
    735 			/*
    736 			 * pvp is non-null when we already have a PV_KENTER
    737 			 * pv in pvh_first; otherwise we haven't seen a
    738 			 * PV_KENTER pv and we need to copy this one to
    739 			 * pvh_first
    740 			 */
    741 			if (pvp) {
    742 				/*
    743 				 * The previous PV_KENTER pv needs to point to
    744 				 * this PV_KENTER pv
    745 				 */
    746 				pvp->pv_next = pv;
    747 			} else {
    748 				pv_entry_t fpv = &mdpg->mdpg_first;
    749 				*fpv = *pv;
    750 				KASSERT(fpv->pv_pmap == pmap_kernel());
    751 			}
    752 			pvp = pv;
    753 			continue;
    754 		}
    755 #endif
    756 		const pmap_t pmap = pv->pv_pmap;
    757 		vaddr_t va = trunc_page(pv->pv_va);
    758 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    759 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
    760 		    pmap_limits.virtual_end);
    761 		pt_entry_t pte = *ptep;
    762 		UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx"
    763 		    " pte %jx", (uintptr_t)pv, (uintptr_t)pmap, va,
    764 		    pte_value(pte));
    765 		if (!pte_valid_p(pte))
    766 			continue;
    767 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    768 		if (is_kernel_pmap_p) {
    769 			PMAP_COUNT(remove_kernel_pages);
    770 		} else {
    771 			PMAP_COUNT(remove_user_pages);
    772 		}
    773 		if (pte_wired_p(pte))
    774 			pmap->pm_stats.wired_count--;
    775 		pmap->pm_stats.resident_count--;
    776 
    777 		pmap_md_tlb_miss_lock_enter();
    778 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    779 		pte_set(ptep, npte);
    780 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    781 			/*
    782 			 * Flush the TLB for the given address.
    783 			 */
    784 			pmap_tlb_invalidate_addr(pmap, va);
    785 		}
    786 		pmap_md_tlb_miss_lock_exit();
    787 
    788 		/*
    789 		 * non-null means this is a non-pvh_first pv, so we should
    790 		 * free it.
    791 		 */
    792 		if (pvp) {
    793 			KASSERT(pvp->pv_pmap == pmap_kernel());
    794 			KASSERT(pvp->pv_next == NULL);
    795 			pmap_pv_free(pv);
    796 		} else {
    797 			pv->pv_pmap = NULL;
    798 			pv->pv_next = NULL;
    799 		}
    800 	}
    801 
    802 	pmap_pvlist_check(mdpg);
    803 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    804 	kpreempt_enable();
    805 
    806 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    807 }
    808 
    809 
    810 /*
    811  *	Make a previously active pmap (vmspace) inactive.
    812  */
    813 void
    814 pmap_deactivate(struct lwp *l)
    815 {
    816 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    817 
    818 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    819 	UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    820 	    (uintptr_t)pmap, 0, 0);
    821 	PMAP_COUNT(deactivate);
    822 
    823 	kpreempt_disable();
    824 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
    825 	pmap_md_tlb_miss_lock_enter();
    826 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    827 #ifdef _LP64
    828 	curcpu()->ci_pmap_user_seg0tab = NULL;
    829 #endif
    830 	pmap_tlb_asid_deactivate(pmap);
    831 	pmap_md_tlb_miss_lock_exit();
    832 	kpreempt_enable();
    833 
    834 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    835 	    l->l_lid, 0, 0);
    836 }
    837 
    838 void
    839 pmap_update(struct pmap *pmap)
    840 {
    841 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    842 	UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    843 	PMAP_COUNT(update);
    844 
    845 	kpreempt_disable();
    846 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
    847 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    848 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    849 		PMAP_COUNT(shootdown_ipis);
    850 #endif
    851 	pmap_md_tlb_miss_lock_enter();
    852 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
    853 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    854 #endif /* DEBUG */
    855 
    856 	/*
    857 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    858 	 * our ASID.  Now we have to reactivate ourselves.
    859 	 */
    860 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    861 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    862 		pmap_tlb_asid_acquire(pmap, curlwp);
    863 		pmap_segtab_activate(pmap, curlwp);
    864 	}
    865 	pmap_md_tlb_miss_lock_exit();
    866 	kpreempt_enable();
    867 
    868 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%#jx)",
    869 		    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
    870 }
    871 
    872 /*
    873  *	Remove the given range of addresses from the specified map.
    874  *
    875  *	It is assumed that the start and end are properly
    876  *	rounded to the page size.
    877  */
    878 
    879 static bool
    880 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    881 	uintptr_t flags)
    882 {
    883 	const pt_entry_t npte = flags;
    884 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    885 
    886 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    887 	UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%c va=%#jx..%#jx)",
    888 	    (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
    889 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx",
    890 	    (uintptr_t)ptep, flags, 0, 0);
    891 
    892 	KASSERT(kpreempt_disabled());
    893 
    894 	for (; sva < eva; sva += NBPG, ptep++) {
    895 		const pt_entry_t pte = *ptep;
    896 		if (!pte_valid_p(pte))
    897 			continue;
    898 		if (is_kernel_pmap_p) {
    899 			PMAP_COUNT(remove_kernel_pages);
    900 		} else {
    901 			PMAP_COUNT(remove_user_pages);
    902 		}
    903 		if (pte_wired_p(pte))
    904 			pmap->pm_stats.wired_count--;
    905 		pmap->pm_stats.resident_count--;
    906 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
    907 		if (__predict_true(pg != NULL)) {
    908 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
    909 		}
    910 		pmap_md_tlb_miss_lock_enter();
    911 		pte_set(ptep, npte);
    912 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    913 
    914 			/*
    915 			 * Flush the TLB for the given address.
    916 			 */
    917 			pmap_tlb_invalidate_addr(pmap, sva);
    918 		}
    919 		pmap_md_tlb_miss_lock_exit();
    920 	}
    921 
    922 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    923 
    924 	return false;
    925 }
    926 
    927 void
    928 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    929 {
    930 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    931 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    932 
    933 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    934 	UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
    935 	    (uintptr_t)pmap, sva, eva, 0);
    936 
    937 	if (is_kernel_pmap_p) {
    938 		PMAP_COUNT(remove_kernel_calls);
    939 	} else {
    940 		PMAP_COUNT(remove_user_calls);
    941 	}
    942 #ifdef PMAP_FAULTINFO
    943 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
    944 	curpcb->pcb_faultinfo.pfi_repeats = 0;
    945 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
    946 #endif
    947 	kpreempt_disable();
    948 	pmap_addr_range_check(pmap, sva, eva, __func__);
    949 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
    950 	kpreempt_enable();
    951 
    952 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    953 }
    954 
    955 /*
    956  *	pmap_page_protect:
    957  *
    958  *	Lower the permission for all mappings to a given page.
    959  */
    960 void
    961 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    962 {
    963 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    964 	pv_entry_t pv;
    965 	vaddr_t va;
    966 
    967 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    968 	UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
    969 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
    970 	PMAP_COUNT(page_protect);
    971 
    972 	switch (prot) {
    973 	case VM_PROT_READ|VM_PROT_WRITE:
    974 	case VM_PROT_ALL:
    975 		break;
    976 
    977 	/* copy_on_write */
    978 	case VM_PROT_READ:
    979 	case VM_PROT_READ|VM_PROT_EXECUTE:
    980 		pv = &mdpg->mdpg_first;
    981 		kpreempt_disable();
    982 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
    983 		pmap_pvlist_check(mdpg);
    984 		/*
    985 		 * Loop over all current mappings setting/clearing as
    986 		 * appropriate.
    987 		 */
    988 		if (pv->pv_pmap != NULL) {
    989 			while (pv != NULL) {
    990 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    991 				if (pv->pv_va & PV_KENTER) {
    992 					pv = pv->pv_next;
    993 					continue;
    994 				}
    995 #endif
    996 				const pmap_t pmap = pv->pv_pmap;
    997 				va = trunc_page(pv->pv_va);
    998 				const uintptr_t gen =
    999 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1000 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
   1001 				KASSERT(pv->pv_pmap == pmap);
   1002 				pmap_update(pmap);
   1003 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
   1004 					pv = &mdpg->mdpg_first;
   1005 				} else {
   1006 					pv = pv->pv_next;
   1007 				}
   1008 				pmap_pvlist_check(mdpg);
   1009 			}
   1010 		}
   1011 		pmap_pvlist_check(mdpg);
   1012 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1013 		kpreempt_enable();
   1014 		break;
   1015 
   1016 	/* remove_all */
   1017 	default:
   1018 		pmap_page_remove(pg);
   1019 	}
   1020 
   1021 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1022 }
   1023 
   1024 static bool
   1025 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1026 	uintptr_t flags)
   1027 {
   1028 	const vm_prot_t prot = (flags & VM_PROT_ALL);
   1029 
   1030 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1031 	UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
   1032 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
   1033 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1034 	    (uintptr_t)ptep, flags, 0, 0);
   1035 
   1036 	KASSERT(kpreempt_disabled());
   1037 	/*
   1038 	 * Change protection on every valid mapping within this segment.
   1039 	 */
   1040 	for (; sva < eva; sva += NBPG, ptep++) {
   1041 		pt_entry_t pte = *ptep;
   1042 		if (!pte_valid_p(pte))
   1043 			continue;
   1044 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1045 		if (pg != NULL && pte_modified_p(pte)) {
   1046 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1047 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1048 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
   1049 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1050 				if (VM_PAGEMD_CACHED_P(mdpg)) {
   1051 #endif
   1052 					UVMHIST_LOG(pmapexechist,
   1053 					    "pg %#jx (pa %#jx): "
   1054 					    "syncicached performed",
   1055 					    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
   1056 					    0, 0);
   1057 					pmap_page_syncicache(pg);
   1058 					PMAP_COUNT(exec_synced_protect);
   1059 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1060 				}
   1061 #endif
   1062 			}
   1063 		}
   1064 		pte = pte_prot_downgrade(pte, prot);
   1065 		if (*ptep != pte) {
   1066 			pmap_md_tlb_miss_lock_enter();
   1067 			pte_set(ptep, pte);
   1068 			/*
   1069 			 * Update the TLB if needed.
   1070 			 */
   1071 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
   1072 			pmap_md_tlb_miss_lock_exit();
   1073 		}
   1074 	}
   1075 
   1076 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1077 
   1078 	return false;
   1079 }
   1080 
   1081 /*
   1082  *	Set the physical protection on the
   1083  *	specified range of this map as requested.
   1084  */
   1085 void
   1086 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1087 {
   1088 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1089 	UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
   1090 	    (uintptr_t)pmap, sva, eva, prot);
   1091 	PMAP_COUNT(protect);
   1092 
   1093 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1094 		pmap_remove(pmap, sva, eva);
   1095 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1096 		return;
   1097 	}
   1098 
   1099 	/*
   1100 	 * Change protection on every valid mapping within this segment.
   1101 	 */
   1102 	kpreempt_disable();
   1103 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1104 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
   1105 	kpreempt_enable();
   1106 
   1107 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1108 }
   1109 
   1110 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
   1111 /*
   1112  *	pmap_page_cache:
   1113  *
   1114  *	Change all mappings of a managed page to cached/uncached.
   1115  */
   1116 void
   1117 pmap_page_cache(struct vm_page *pg, bool cached)
   1118 {
   1119 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1120 
   1121 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1122 	UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)",
   1123 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0);
   1124 
   1125 	KASSERT(kpreempt_disabled());
   1126 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1127 
   1128 	if (cached) {
   1129 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1130 		PMAP_COUNT(page_cache_restorations);
   1131 	} else {
   1132 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1133 		PMAP_COUNT(page_cache_evictions);
   1134 	}
   1135 
   1136 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
   1137 		pmap_t pmap = pv->pv_pmap;
   1138 		vaddr_t va = trunc_page(pv->pv_va);
   1139 
   1140 		KASSERT(pmap != NULL);
   1141 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1142 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1143 		if (ptep == NULL)
   1144 			continue;
   1145 		pt_entry_t pte = *ptep;
   1146 		if (pte_valid_p(pte)) {
   1147 			pte = pte_cached_change(pte, cached);
   1148 			pmap_md_tlb_miss_lock_enter();
   1149 			pte_set(ptep, pte);
   1150 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
   1151 			pmap_md_tlb_miss_lock_exit();
   1152 		}
   1153 	}
   1154 
   1155 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1156 }
   1157 #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
   1158 
   1159 /*
   1160  *	Insert the given physical page (p) at
   1161  *	the specified virtual address (v) in the
   1162  *	target physical map with the protection requested.
   1163  *
   1164  *	If specified, the page will be wired down, meaning
   1165  *	that the related pte can not be reclaimed.
   1166  *
   1167  *	NB:  This is the only routine which MAY NOT lazy-evaluate
   1168  *	or lose information.  That is, this routine must actually
   1169  *	insert this page into the given map NOW.
   1170  */
   1171 int
   1172 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1173 {
   1174 	const bool wired = (flags & PMAP_WIRED) != 0;
   1175 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1176 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
   1177 #ifdef UVMHIST
   1178 	struct kern_history * const histp =
   1179 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
   1180 #endif
   1181 
   1182 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp);
   1183 	UVMHIST_LOG(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
   1184 	    (uintptr_t)pmap, va, pa, 0);
   1185 	UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
   1186 
   1187 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1188 	if (is_kernel_pmap_p) {
   1189 		PMAP_COUNT(kernel_mappings);
   1190 		if (!good_color)
   1191 			PMAP_COUNT(kernel_mappings_bad);
   1192 	} else {
   1193 		PMAP_COUNT(user_mappings);
   1194 		if (!good_color)
   1195 			PMAP_COUNT(user_mappings_bad);
   1196 	}
   1197 	pmap_addr_range_check(pmap, va, va, __func__);
   1198 
   1199 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
   1200 	    VM_PROT_READ, prot);
   1201 
   1202 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1203 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1204 
   1205 	if (pg) {
   1206 		/* Set page referenced/modified status based on flags */
   1207 		if (flags & VM_PROT_WRITE) {
   1208 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1209 		} else if (flags & VM_PROT_ALL) {
   1210 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1211 		}
   1212 
   1213 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1214 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
   1215 			flags |= PMAP_NOCACHE;
   1216 			PMAP_COUNT(uncached_mappings);
   1217 		}
   1218 #endif
   1219 
   1220 		PMAP_COUNT(managed_mappings);
   1221 	} else {
   1222 		/*
   1223 		 * Assumption: if it is not part of our managed memory
   1224 		 * then it must be device memory which may be volatile.
   1225 		 */
   1226 		if ((flags & PMAP_CACHE_MASK) == 0)
   1227 			flags |= PMAP_NOCACHE;
   1228 		PMAP_COUNT(unmanaged_mappings);
   1229 	}
   1230 
   1231 	pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags,
   1232 	    is_kernel_pmap_p);
   1233 
   1234 	kpreempt_disable();
   1235 
   1236 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1237 	if (__predict_false(ptep == NULL)) {
   1238 		kpreempt_enable();
   1239 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
   1240 		return ENOMEM;
   1241 	}
   1242 	const pt_entry_t opte = *ptep;
   1243 	const bool resident = pte_valid_p(opte);
   1244 	bool remap = false;
   1245 	if (resident) {
   1246 		if (pte_to_paddr(opte) != pa) {
   1247 			KASSERT(!is_kernel_pmap_p);
   1248 		    	const pt_entry_t rpte = pte_nv_entry(false);
   1249 
   1250 			pmap_addr_range_check(pmap, va, va + NBPG, __func__);
   1251 			pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
   1252 			    rpte);
   1253 			PMAP_COUNT(user_mappings_changed);
   1254 			remap = true;
   1255 		}
   1256 		update_flags |= PMAP_TLB_NEED_IPI;
   1257 	}
   1258 
   1259 	if (!resident || remap) {
   1260 		pmap->pm_stats.resident_count++;
   1261 	}
   1262 
   1263 	/* Done after case that may sleep/return. */
   1264 	if (pg)
   1265 		pmap_enter_pv(pmap, va, pg, &npte, 0);
   1266 
   1267 	/*
   1268 	 * Now validate mapping with desired protection/wiring.
   1269 	 * Assume uniform modified and referenced status for all
   1270 	 * MIPS pages in a MACH page.
   1271 	 */
   1272 	if (wired) {
   1273 		pmap->pm_stats.wired_count++;
   1274 		npte = pte_wire_entry(npte);
   1275 	}
   1276 
   1277 	UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
   1278 	    pte_value(npte), pa, 0, 0);
   1279 
   1280 	KASSERT(pte_valid_p(npte));
   1281 
   1282 	pmap_md_tlb_miss_lock_enter();
   1283 	pte_set(ptep, npte);
   1284 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
   1285 	pmap_md_tlb_miss_lock_exit();
   1286 	kpreempt_enable();
   1287 
   1288 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1289 		KASSERT(mdpg != NULL);
   1290 		PMAP_COUNT(exec_mappings);
   1291 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1292 			if (!pte_deferred_exec_p(npte)) {
   1293 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
   1294 				    "immediate syncicache",
   1295 				    va, (uintptr_t)pg, 0, 0);
   1296 				pmap_page_syncicache(pg);
   1297 				pmap_page_set_attributes(mdpg,
   1298 				    VM_PAGEMD_EXECPAGE);
   1299 				PMAP_COUNT(exec_synced_mappings);
   1300 			} else {
   1301 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
   1302 				    "syncicache: pte %#jx",
   1303 				    va, (uintptr_t)pg, npte, 0);
   1304 			}
   1305 		} else {
   1306 			UVMHIST_LOG(*histp,
   1307 			    "va=%#jx pg %#jx: no syncicache cached %jd",
   1308 			    va, (uintptr_t)pg, pte_cached_p(npte), 0);
   1309 		}
   1310 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1311 		KASSERT(mdpg != NULL);
   1312 		KASSERT(prot & VM_PROT_WRITE);
   1313 		PMAP_COUNT(exec_mappings);
   1314 		pmap_page_syncicache(pg);
   1315 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1316 		UVMHIST_LOG(*histp,
   1317 		    "va=%#jx pg %#jx: immediate syncicache (writeable)",
   1318 		    va, (uintptr_t)pg, 0, 0);
   1319 	}
   1320 
   1321 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
   1322 	return 0;
   1323 }
   1324 
   1325 void
   1326 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1327 {
   1328 	pmap_t pmap = pmap_kernel();
   1329 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1330 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1331 
   1332 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1333 	UVMHIST_LOG(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
   1334 	    va, pa, prot, flags);
   1335 	PMAP_COUNT(kenter_pa);
   1336 
   1337 	if (mdpg == NULL) {
   1338 		PMAP_COUNT(kenter_pa_unmanaged);
   1339 		if ((flags & PMAP_CACHE_MASK) == 0)
   1340 			flags |= PMAP_NOCACHE;
   1341 	} else {
   1342 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1343 			PMAP_COUNT(kenter_pa_bad);
   1344 	}
   1345 
   1346 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1347 	kpreempt_disable();
   1348 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1349 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
   1350 	    pmap_limits.virtual_end);
   1351 	KASSERT(!pte_valid_p(*ptep));
   1352 
   1353 	/*
   1354 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
   1355 	 */
   1356 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1357 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0
   1358 	    && pmap_md_virtual_cache_aliasing_p()) {
   1359 		pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
   1360 	}
   1361 #endif
   1362 
   1363 	/*
   1364 	 * We have the option to force this mapping into the TLB but we
   1365 	 * don't.  Instead let the next reference to the page do it.
   1366 	 */
   1367 	pmap_md_tlb_miss_lock_enter();
   1368 	pte_set(ptep, npte);
   1369 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1370 	pmap_md_tlb_miss_lock_exit();
   1371 	kpreempt_enable();
   1372 #if DEBUG > 1
   1373 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1374 		if (((long *)va)[i] != ((long *)pa)[i])
   1375 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1376 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1377 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1378 	}
   1379 #endif
   1380 
   1381 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
   1382 	    0);
   1383 }
   1384 
   1385 /*
   1386  *	Remove the given range of addresses from the kernel map.
   1387  *
   1388  *	It is assumed that the start and end are properly
   1389  *	rounded to the page size.
   1390  */
   1391 
   1392 static bool
   1393 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1394 	uintptr_t flags)
   1395 {
   1396 	const pt_entry_t new_pte = pte_nv_entry(true);
   1397 
   1398 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1399 	UVMHIST_LOG(pmaphist,
   1400 	    "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
   1401 	    (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
   1402 
   1403 	KASSERT(kpreempt_disabled());
   1404 
   1405 	for (; sva < eva; sva += NBPG, ptep++) {
   1406 		pt_entry_t pte = *ptep;
   1407 		if (!pte_valid_p(pte))
   1408 			continue;
   1409 
   1410 		PMAP_COUNT(kremove_pages);
   1411 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1412 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1413 		if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
   1414 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
   1415 		}
   1416 #endif
   1417 
   1418 		pmap_md_tlb_miss_lock_enter();
   1419 		pte_set(ptep, new_pte);
   1420 		pmap_tlb_invalidate_addr(pmap, sva);
   1421 		pmap_md_tlb_miss_lock_exit();
   1422 	}
   1423 
   1424 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1425 
   1426 	return false;
   1427 }
   1428 
   1429 void
   1430 pmap_kremove(vaddr_t va, vsize_t len)
   1431 {
   1432 	const vaddr_t sva = trunc_page(va);
   1433 	const vaddr_t eva = round_page(va + len);
   1434 
   1435 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1436 	UVMHIST_LOG(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
   1437 
   1438 	kpreempt_disable();
   1439 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1440 	kpreempt_enable();
   1441 
   1442 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1443 }
   1444 
   1445 void
   1446 pmap_remove_all(struct pmap *pmap)
   1447 {
   1448 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1449 	UVMHIST_LOG(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1450 
   1451 	KASSERT(pmap != pmap_kernel());
   1452 
   1453 	kpreempt_disable();
   1454 	/*
   1455 	 * Free all of our ASIDs which means we can skip doing all the
   1456 	 * tlb_invalidate_addrs().
   1457 	 */
   1458 	pmap_md_tlb_miss_lock_enter();
   1459 #ifdef MULTIPROCESSOR
   1460 	// This should be the last CPU with this pmap onproc
   1461 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
   1462 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
   1463 #endif
   1464 		pmap_tlb_asid_deactivate(pmap);
   1465 #ifdef MULTIPROCESSOR
   1466 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
   1467 #endif
   1468 	pmap_tlb_asid_release_all(pmap);
   1469 	pmap_md_tlb_miss_lock_exit();
   1470 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1471 
   1472 #ifdef PMAP_FAULTINFO
   1473 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1474 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1475 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
   1476 #endif
   1477 	kpreempt_enable();
   1478 
   1479 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1480 }
   1481 
   1482 /*
   1483  *	Routine:	pmap_unwire
   1484  *	Function:	Clear the wired attribute for a map/virtual-address
   1485  *			pair.
   1486  *	In/out conditions:
   1487  *			The mapping must already exist in the pmap.
   1488  */
   1489 void
   1490 pmap_unwire(pmap_t pmap, vaddr_t va)
   1491 {
   1492 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1493 	UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
   1494 	    0, 0);
   1495 	PMAP_COUNT(unwire);
   1496 
   1497 	/*
   1498 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1499 	 */
   1500 	kpreempt_disable();
   1501 	pmap_addr_range_check(pmap, va, va, __func__);
   1502 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1503 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
   1504 	    pmap, va);
   1505 	pt_entry_t pte = *ptep;
   1506 	KASSERTMSG(pte_valid_p(pte),
   1507 	    "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p",
   1508 	    pmap, va, pte_value(pte), ptep);
   1509 
   1510 	if (pte_wired_p(pte)) {
   1511 		pmap_md_tlb_miss_lock_enter();
   1512 		pte_set(ptep, pte_unwire_entry(pte));
   1513 		pmap_md_tlb_miss_lock_exit();
   1514 		pmap->pm_stats.wired_count--;
   1515 	}
   1516 #ifdef DIAGNOSTIC
   1517 	else {
   1518 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1519 		    __func__, pmap, va);
   1520 	}
   1521 #endif
   1522 	kpreempt_enable();
   1523 
   1524 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1525 }
   1526 
   1527 /*
   1528  *	Routine:	pmap_extract
   1529  *	Function:
   1530  *		Extract the physical page address associated
   1531  *		with the given map/virtual_address pair.
   1532  */
   1533 bool
   1534 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1535 {
   1536 	paddr_t pa;
   1537 
   1538 	if (pmap == pmap_kernel()) {
   1539 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1540 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1541 			goto done;
   1542 		}
   1543 		if (pmap_md_io_vaddr_p(va))
   1544 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1545 
   1546 		if (va >= pmap_limits.virtual_end)
   1547 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
   1548 			    __func__, va);
   1549 	}
   1550 	kpreempt_disable();
   1551 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1552 	if (ptep == NULL || !pte_valid_p(*ptep)) {
   1553 		kpreempt_enable();
   1554 		return false;
   1555 	}
   1556 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1557 	kpreempt_enable();
   1558 done:
   1559 	if (pap != NULL) {
   1560 		*pap = pa;
   1561 	}
   1562 	return true;
   1563 }
   1564 
   1565 /*
   1566  *	Copy the range specified by src_addr/len
   1567  *	from the source map to the range dst_addr/len
   1568  *	in the destination map.
   1569  *
   1570  *	This routine is only advisory and need not do anything.
   1571  */
   1572 void
   1573 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1574     vaddr_t src_addr)
   1575 {
   1576 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1577 	PMAP_COUNT(copy);
   1578 }
   1579 
   1580 /*
   1581  *	pmap_clear_reference:
   1582  *
   1583  *	Clear the reference bit on the specified physical page.
   1584  */
   1585 bool
   1586 pmap_clear_reference(struct vm_page *pg)
   1587 {
   1588 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1589 
   1590 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1591 	UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx))",
   1592 	   (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1593 
   1594 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1595 
   1596 	UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
   1597 
   1598 	return rv;
   1599 }
   1600 
   1601 /*
   1602  *	pmap_is_referenced:
   1603  *
   1604  *	Return whether or not the specified physical page is referenced
   1605  *	by any physical maps.
   1606  */
   1607 bool
   1608 pmap_is_referenced(struct vm_page *pg)
   1609 {
   1610 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1611 }
   1612 
   1613 /*
   1614  *	Clear the modify bits on the specified physical page.
   1615  */
   1616 bool
   1617 pmap_clear_modify(struct vm_page *pg)
   1618 {
   1619 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1620 	pv_entry_t pv = &mdpg->mdpg_first;
   1621 	pv_entry_t pv_next;
   1622 
   1623 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1624 	UVMHIST_LOG(pmaphist, "(pg=%#jx (%#jx))",
   1625 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1626 	PMAP_COUNT(clear_modify);
   1627 
   1628 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1629 		if (pv->pv_pmap == NULL) {
   1630 			UVMHIST_LOG(pmapexechist,
   1631 			    "pg %#jx (pa %#jx): execpage cleared",
   1632 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1633 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1634 			PMAP_COUNT(exec_uncached_clear_modify);
   1635 		} else {
   1636 			UVMHIST_LOG(pmapexechist,
   1637 			    "pg %#jx (pa %#jx): syncicache performed",
   1638 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1639 			pmap_page_syncicache(pg);
   1640 			PMAP_COUNT(exec_synced_clear_modify);
   1641 		}
   1642 	}
   1643 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1644 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
   1645 		return false;
   1646 	}
   1647 	if (pv->pv_pmap == NULL) {
   1648 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
   1649 		return true;
   1650 	}
   1651 
   1652 	/*
   1653 	 * remove write access from any pages that are dirty
   1654 	 * so we can tell if they are written to again later.
   1655 	 * flush the VAC first if there is one.
   1656 	 */
   1657 	kpreempt_disable();
   1658 	KASSERT(!VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1659 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1660 	pmap_pvlist_check(mdpg);
   1661 	for (; pv != NULL; pv = pv_next) {
   1662 		pmap_t pmap = pv->pv_pmap;
   1663 		vaddr_t va = trunc_page(pv->pv_va);
   1664 
   1665 		pv_next = pv->pv_next;
   1666 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1667 		if (pv->pv_va & PV_KENTER)
   1668 			continue;
   1669 #endif
   1670 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1671 		KASSERT(ptep);
   1672 		pt_entry_t pte = pte_prot_nowrite(*ptep);
   1673 		if (*ptep == pte) {
   1674 			continue;
   1675 		}
   1676 		KASSERT(pte_valid_p(pte));
   1677 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1678 		pmap_md_tlb_miss_lock_enter();
   1679 		pte_set(ptep, pte);
   1680 		pmap_tlb_invalidate_addr(pmap, va);
   1681 		pmap_md_tlb_miss_lock_exit();
   1682 		pmap_update(pmap);
   1683 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
   1684 			/*
   1685 			 * The list changed!  So restart from the beginning.
   1686 			 */
   1687 			pv_next = &mdpg->mdpg_first;
   1688 			pmap_pvlist_check(mdpg);
   1689 		}
   1690 	}
   1691 	pmap_pvlist_check(mdpg);
   1692 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1693 	kpreempt_enable();
   1694 
   1695 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
   1696 	return true;
   1697 }
   1698 
   1699 /*
   1700  *	pmap_is_modified:
   1701  *
   1702  *	Return whether or not the specified physical page is modified
   1703  *	by any physical maps.
   1704  */
   1705 bool
   1706 pmap_is_modified(struct vm_page *pg)
   1707 {
   1708 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1709 }
   1710 
   1711 /*
   1712  *	pmap_set_modified:
   1713  *
   1714  *	Sets the page modified reference bit for the specified page.
   1715  */
   1716 void
   1717 pmap_set_modified(paddr_t pa)
   1718 {
   1719 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1720 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1721 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1722 }
   1723 
   1724 /******************** pv_entry management ********************/
   1725 
   1726 static void
   1727 pmap_pvlist_check(struct vm_page_md *mdpg)
   1728 {
   1729 #ifdef DEBUG
   1730 	pv_entry_t pv = &mdpg->mdpg_first;
   1731 	if (pv->pv_pmap != NULL) {
   1732 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1733 		const u_int colormask = uvmexp.colormask;
   1734 		u_int colors = 0;
   1735 #endif
   1736 		for (; pv != NULL; pv = pv->pv_next) {
   1737 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1738 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1739 			colors |= __BIT(atop(pv->pv_va) & colormask);
   1740 #endif
   1741 		}
   1742 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1743 		// Assert that if there is more than 1 color mapped, that the
   1744 		// page is uncached.
   1745 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
   1746 		    || colors == 0 || (colors & (colors-1)) == 0
   1747 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
   1748 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
   1749 #endif
   1750 	} else {
   1751     		KASSERT(pv->pv_next == NULL);
   1752 	}
   1753 #endif /* DEBUG */
   1754 }
   1755 
   1756 /*
   1757  * Enter the pmap and virtual address into the
   1758  * physical to virtual map table.
   1759  */
   1760 void
   1761 pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, pt_entry_t *nptep,
   1762     u_int flags)
   1763 {
   1764 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1765 	pv_entry_t pv, npv, apv;
   1766 #ifdef UVMHIST
   1767 	bool first = false;
   1768 #endif
   1769 
   1770 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1771 	UVMHIST_LOG(pmaphist,
   1772 	    "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
   1773 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
   1774 	UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
   1775 	    (uintptr_t)nptep, pte_value(*nptep), 0, 0);
   1776 
   1777 	KASSERT(kpreempt_disabled());
   1778 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1779 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
   1780 	    "va %#"PRIxVADDR, va);
   1781 
   1782 	apv = NULL;
   1783 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   1784 again:
   1785 	pv = &mdpg->mdpg_first;
   1786 	pmap_pvlist_check(mdpg);
   1787 	if (pv->pv_pmap == NULL) {
   1788 		KASSERT(pv->pv_next == NULL);
   1789 		/*
   1790 		 * No entries yet, use header as the first entry
   1791 		 */
   1792 		PMAP_COUNT(primary_mappings);
   1793 		PMAP_COUNT(mappings);
   1794 #ifdef UVMHIST
   1795 		first = true;
   1796 #endif
   1797 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1798 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
   1799 		// If the new mapping has an incompatible color the last
   1800 		// mapping of this page, clean the page before using it.
   1801 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
   1802 			pmap_md_vca_clean(pg, PMAP_WBINV);
   1803 		}
   1804 #endif
   1805 		pv->pv_pmap = pmap;
   1806 		pv->pv_va = va | flags;
   1807 	} else {
   1808 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1809 		if (pmap_md_vca_add(pg, va, nptep)) {
   1810 			goto again;
   1811 		}
   1812 #endif
   1813 
   1814 		/*
   1815 		 * There is at least one other VA mapping this page.
   1816 		 * Place this entry after the header.
   1817 		 *
   1818 		 * Note: the entry may already be in the table if
   1819 		 * we are only changing the protection bits.
   1820 		 */
   1821 
   1822 #ifdef PARANOIADIAG
   1823 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1824 #endif
   1825 		for (npv = pv; npv; npv = npv->pv_next) {
   1826 			if (pmap == npv->pv_pmap
   1827 			    && va == trunc_page(npv->pv_va)) {
   1828 #ifdef PARANOIADIAG
   1829 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1830 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
   1831 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
   1832 					printf("%s: found va %#"PRIxVADDR
   1833 					    " pa %#"PRIxPADDR
   1834 					    " in pv_table but != %#"PRIxPTE"\n",
   1835 					    __func__, va, pa, pte_value(pte));
   1836 #endif
   1837 				PMAP_COUNT(remappings);
   1838 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1839 				if (__predict_false(apv != NULL))
   1840 					pmap_pv_free(apv);
   1841 
   1842 				UVMHIST_LOG(pmaphist,
   1843 				    " <-- done pv=%#jx (reused)",
   1844 				    (uintptr_t)pv, 0, 0, 0);
   1845 				return;
   1846 			}
   1847 		}
   1848 		if (__predict_true(apv == NULL)) {
   1849 			/*
   1850 			 * To allocate a PV, we have to release the PVLIST lock
   1851 			 * so get the page generation.  We allocate the PV, and
   1852 			 * then reacquire the lock.
   1853 			 */
   1854 			pmap_pvlist_check(mdpg);
   1855 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1856 
   1857 			apv = (pv_entry_t)pmap_pv_alloc();
   1858 			if (apv == NULL)
   1859 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1860 
   1861 			/*
   1862 			 * If the generation has changed, then someone else
   1863 			 * tinkered with this page so we should start over.
   1864 			 */
   1865 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
   1866 				goto again;
   1867 		}
   1868 		npv = apv;
   1869 		apv = NULL;
   1870 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1871 		/*
   1872 		 * If need to deal with virtual cache aliases, keep mappings
   1873 		 * in the kernel pmap at the head of the list.  This allows
   1874 		 * the VCA code to easily use them for cache operations if
   1875 		 * present.
   1876 		 */
   1877 		pmap_t kpmap = pmap_kernel();
   1878 		if (pmap != kpmap) {
   1879 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
   1880 				pv = pv->pv_next;
   1881 			}
   1882 		}
   1883 #endif
   1884 		npv->pv_va = va | flags;
   1885 		npv->pv_pmap = pmap;
   1886 		npv->pv_next = pv->pv_next;
   1887 		pv->pv_next = npv;
   1888 		PMAP_COUNT(mappings);
   1889 	}
   1890 	pmap_pvlist_check(mdpg);
   1891 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1892 	if (__predict_false(apv != NULL))
   1893 		pmap_pv_free(apv);
   1894 
   1895 	UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
   1896 	    first, 0, 0);
   1897 }
   1898 
   1899 /*
   1900  * Remove a physical to virtual address translation.
   1901  * If cache was inhibited on this page, and there are no more cache
   1902  * conflicts, restore caching.
   1903  * Flush the cache if the last page is removed (should always be cached
   1904  * at this point).
   1905  */
   1906 void
   1907 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   1908 {
   1909 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1910 	pv_entry_t pv, npv;
   1911 	bool last;
   1912 
   1913 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1914 	UVMHIST_LOG(pmaphist,
   1915 	    "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
   1916 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
   1917 	UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
   1918 
   1919 	KASSERT(kpreempt_disabled());
   1920 	KASSERT((va & PAGE_MASK) == 0);
   1921 	pv = &mdpg->mdpg_first;
   1922 
   1923 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   1924 	pmap_pvlist_check(mdpg);
   1925 
   1926 	/*
   1927 	 * If it is the first entry on the list, it is actually
   1928 	 * in the header and we must copy the following entry up
   1929 	 * to the header.  Otherwise we must search the list for
   1930 	 * the entry.  In either case we free the now unused entry.
   1931 	 */
   1932 
   1933 	last = false;
   1934 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
   1935 		npv = pv->pv_next;
   1936 		if (npv) {
   1937 			*pv = *npv;
   1938 			KASSERT(pv->pv_pmap != NULL);
   1939 		} else {
   1940 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1941 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1942 #endif
   1943 			pv->pv_pmap = NULL;
   1944 			last = true;	/* Last mapping removed */
   1945 		}
   1946 		PMAP_COUNT(remove_pvfirst);
   1947 	} else {
   1948 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   1949 			PMAP_COUNT(remove_pvsearch);
   1950 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
   1951 				break;
   1952 		}
   1953 		if (npv) {
   1954 			pv->pv_next = npv->pv_next;
   1955 		}
   1956 	}
   1957 
   1958 	pmap_pvlist_check(mdpg);
   1959 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1960 
   1961 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1962 	pmap_md_vca_remove(pg, va, dirty, last);
   1963 #endif
   1964 
   1965 	/*
   1966 	 * Free the pv_entry if needed.
   1967 	 */
   1968 	if (npv)
   1969 		pmap_pv_free(npv);
   1970 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   1971 		if (last) {
   1972 			/*
   1973 			 * If this was the page's last mapping, we no longer
   1974 			 * care about its execness.
   1975 			 */
   1976 			UVMHIST_LOG(pmapexechist,
   1977 			    "pg %#jx (pa %#jx)last %ju: execpage cleared",
   1978 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   1979 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1980 			PMAP_COUNT(exec_uncached_remove);
   1981 		} else {
   1982 			/*
   1983 			 * Someone still has it mapped as an executable page
   1984 			 * so we must sync it.
   1985 			 */
   1986 			UVMHIST_LOG(pmapexechist,
   1987 			    "pg %#jx (pa %#jx) last %ju: performed syncicache",
   1988 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   1989 			pmap_page_syncicache(pg);
   1990 			PMAP_COUNT(exec_synced_remove);
   1991 		}
   1992 	}
   1993 
   1994 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1995 }
   1996 
   1997 #if defined(MULTIPROCESSOR)
   1998 struct pmap_pvlist_info {
   1999 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   2000 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   2001 	volatile u_int pli_lock_index;
   2002 	u_int pli_lock_mask;
   2003 } pmap_pvlist_info;
   2004 
   2005 void
   2006 pmap_pvlist_lock_init(size_t cache_line_size)
   2007 {
   2008 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2009 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   2010 	vaddr_t lock_va = lock_page;
   2011 	if (sizeof(kmutex_t) > cache_line_size) {
   2012 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   2013 	}
   2014 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   2015 	KASSERT((nlocks & (nlocks - 1)) == 0);
   2016 	/*
   2017 	 * Now divide the page into a number of mutexes, one per cacheline.
   2018 	 */
   2019 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   2020 		kmutex_t * const lock = (kmutex_t *)lock_va;
   2021 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
   2022 		pli->pli_locks[i] = lock;
   2023 	}
   2024 	pli->pli_lock_mask = nlocks - 1;
   2025 }
   2026 
   2027 kmutex_t *
   2028 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2029 {
   2030 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2031 	kmutex_t *lock = mdpg->mdpg_lock;
   2032 
   2033 	/*
   2034 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   2035 	 * semi-random distribution not based on page color.
   2036 	 */
   2037 	if (__predict_false(lock == NULL)) {
   2038 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   2039 		size_t lockid = locknum & pli->pli_lock_mask;
   2040 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   2041 		/*
   2042 		 * Set the lock.  If some other thread already did, just use
   2043 		 * the one they assigned.
   2044 		 */
   2045 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   2046 		if (lock == NULL) {
   2047 			lock = new_lock;
   2048 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   2049 		}
   2050 	}
   2051 
   2052 	/*
   2053 	 * Now finally provide the lock.
   2054 	 */
   2055 	return lock;
   2056 }
   2057 #else /* !MULTIPROCESSOR */
   2058 void
   2059 pmap_pvlist_lock_init(size_t cache_line_size)
   2060 {
   2061 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
   2062 }
   2063 
   2064 #ifdef MODULAR
   2065 kmutex_t *
   2066 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2067 {
   2068 	/*
   2069 	 * We just use a global lock.
   2070 	 */
   2071 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   2072 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   2073 	}
   2074 
   2075 	/*
   2076 	 * Now finally provide the lock.
   2077 	 */
   2078 	return mdpg->mdpg_lock;
   2079 }
   2080 #endif /* MODULAR */
   2081 #endif /* !MULTIPROCESSOR */
   2082 
   2083 /*
   2084  * pmap_pv_page_alloc:
   2085  *
   2086  *	Allocate a page for the pv_entry pool.
   2087  */
   2088 void *
   2089 pmap_pv_page_alloc(struct pool *pp, int flags)
   2090 {
   2091 	struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   2092 	if (pg == NULL)
   2093 		return NULL;
   2094 
   2095 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   2096 }
   2097 
   2098 /*
   2099  * pmap_pv_page_free:
   2100  *
   2101  *	Free a pv_entry pool page.
   2102  */
   2103 void
   2104 pmap_pv_page_free(struct pool *pp, void *v)
   2105 {
   2106 	vaddr_t va = (vaddr_t)v;
   2107 
   2108 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2109 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2110 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2111 	KASSERT(pg != NULL);
   2112 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2113 	kpreempt_disable();
   2114 	pmap_md_vca_remove(pg, va, true, true);
   2115 	kpreempt_enable();
   2116 #endif
   2117 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2118 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2119 	uvm_pagefree(pg);
   2120 }
   2121 
   2122 #ifdef PMAP_PREFER
   2123 /*
   2124  * Find first virtual address >= *vap that doesn't cause
   2125  * a cache alias conflict.
   2126  */
   2127 void
   2128 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   2129 {
   2130 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   2131 
   2132 	PMAP_COUNT(prefer_requests);
   2133 
   2134 	prefer_mask |= pmap_md_cache_prefer_mask();
   2135 
   2136 	if (prefer_mask) {
   2137 		vaddr_t	va = *vap;
   2138 		vsize_t d = (foff - va) & prefer_mask;
   2139 		if (d) {
   2140 			if (td)
   2141 				*vap = trunc_page(va - ((-d) & prefer_mask));
   2142 			else
   2143 				*vap = round_page(va + d);
   2144 			PMAP_COUNT(prefer_adjustments);
   2145 		}
   2146 	}
   2147 }
   2148 #endif /* PMAP_PREFER */
   2149 
   2150 #ifdef PMAP_MAP_POOLPAGE
   2151 vaddr_t
   2152 pmap_map_poolpage(paddr_t pa)
   2153 {
   2154 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2155 	KASSERT(pg);
   2156 
   2157 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2158 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
   2159 
   2160 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   2161 
   2162 	return pmap_md_map_poolpage(pa, NBPG);
   2163 }
   2164 
   2165 paddr_t
   2166 pmap_unmap_poolpage(vaddr_t va)
   2167 {
   2168 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2169 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2170 
   2171 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2172 	KASSERT(pg != NULL);
   2173 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2174 
   2175 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2176 	pmap_md_unmap_poolpage(va, NBPG);
   2177 
   2178 	return pa;
   2179 }
   2180 #endif /* PMAP_MAP_POOLPAGE */
   2181