Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.58
      1 /*	$NetBSD: pmap.c,v 1.58 2020/12/20 16:38:26 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.58 2020/12/20 16:38:26 skrll Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #include "opt_modular.h"
     99 #include "opt_multiprocessor.h"
    100 #include "opt_sysv.h"
    101 
    102 #define __PMAP_PRIVATE
    103 
    104 #include <sys/param.h>
    105 
    106 #include <sys/atomic.h>
    107 #include <sys/buf.h>
    108 #include <sys/cpu.h>
    109 #include <sys/mutex.h>
    110 #include <sys/pool.h>
    111 
    112 #include <uvm/uvm.h>
    113 #include <uvm/uvm_physseg.h>
    114 #include <uvm/pmap/pmap_pvt.h>
    115 
    116 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    117     && !defined(PMAP_NO_PV_UNCACHED)
    118 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
    119  PMAP_NO_PV_UNCACHED to be defined
    120 #endif
    121 
    122 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    123 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    124 PMAP_COUNTER(remove_user_calls, "remove user calls");
    125 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    126 PMAP_COUNTER(remove_flushes, "remove cache flushes");
    127 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    128 PMAP_COUNTER(remove_pvfirst, "remove pv first");
    129 PMAP_COUNTER(remove_pvsearch, "remove pv search");
    130 
    131 PMAP_COUNTER(prefer_requests, "prefer requests");
    132 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    133 
    134 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    135 
    136 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    137 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    138 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    139 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    140 
    141 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    142 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    143 
    144 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    145 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    146 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    147 PMAP_COUNTER(user_mappings, "user pages mapped");
    148 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    149 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    150 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    151 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    152 PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
    153 PMAP_COUNTER(managed_mappings, "managed pages mapped");
    154 PMAP_COUNTER(mappings, "pages mapped");
    155 PMAP_COUNTER(remappings, "pages remapped");
    156 PMAP_COUNTER(unmappings, "pages unmapped");
    157 PMAP_COUNTER(primary_mappings, "page initial mappings");
    158 PMAP_COUNTER(primary_unmappings, "page final unmappings");
    159 PMAP_COUNTER(tlb_hit, "page mapping");
    160 
    161 PMAP_COUNTER(exec_mappings, "exec pages mapped");
    162 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    163 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    164 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    165 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    166 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    167 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    168 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    169 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    170 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    171 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    172 
    173 PMAP_COUNTER(create, "creates");
    174 PMAP_COUNTER(reference, "references");
    175 PMAP_COUNTER(dereference, "dereferences");
    176 PMAP_COUNTER(destroy, "destroyed");
    177 PMAP_COUNTER(activate, "activations");
    178 PMAP_COUNTER(deactivate, "deactivations");
    179 PMAP_COUNTER(update, "updates");
    180 #ifdef MULTIPROCESSOR
    181 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    182 #endif
    183 PMAP_COUNTER(unwire, "unwires");
    184 PMAP_COUNTER(copy, "copies");
    185 PMAP_COUNTER(clear_modify, "clear_modifies");
    186 PMAP_COUNTER(protect, "protects");
    187 PMAP_COUNTER(page_protect, "page_protects");
    188 
    189 #define PMAP_ASID_RESERVED 0
    190 CTASSERT(PMAP_ASID_RESERVED == 0);
    191 
    192 #ifndef PMAP_SEGTAB_ALIGN
    193 #define PMAP_SEGTAB_ALIGN	/* nothing */
    194 #endif
    195 #ifdef _LP64
    196 pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
    197 #endif
    198 pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
    199 #ifdef _LP64
    200 	.seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab,
    201 #endif
    202 };
    203 
    204 struct pmap_kernel kernel_pmap_store = {
    205 	.kernel_pmap = {
    206 		.pm_count = 1,
    207 		.pm_segtab = &pmap_kern_segtab,
    208 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    209 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    210 	},
    211 };
    212 
    213 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    214 
    215 struct pmap_limits pmap_limits = {	/* VA and PA limits */
    216 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    217 };
    218 
    219 #ifdef UVMHIST
    220 static struct kern_history_ent pmapexechistbuf[10000];
    221 static struct kern_history_ent pmaphistbuf[10000];
    222 static struct kern_history_ent pmapsegtabhistbuf[1000];
    223 UVMHIST_DEFINE(pmapexechist);
    224 UVMHIST_DEFINE(pmaphist);
    225 UVMHIST_DEFINE(pmapsegtabhist);
    226 #endif
    227 
    228 /*
    229  * The pools from which pmap structures and sub-structures are allocated.
    230  */
    231 struct pool pmap_pmap_pool;
    232 struct pool pmap_pv_pool;
    233 
    234 #ifndef PMAP_PV_LOWAT
    235 #define	PMAP_PV_LOWAT	16
    236 #endif
    237 int	pmap_pv_lowat = PMAP_PV_LOWAT;
    238 
    239 bool	pmap_initialized = false;
    240 #define	PMAP_PAGE_COLOROK_P(a, b) \
    241 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    242 u_int	pmap_page_colormask;
    243 
    244 #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    245 
    246 #define PMAP_IS_ACTIVE(pm)						\
    247 	((pm) == pmap_kernel() || 					\
    248 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    249 
    250 /* Forward function declarations */
    251 void pmap_page_remove(struct vm_page_md *);
    252 static void pmap_pvlist_check(struct vm_page_md *);
    253 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    254 void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
    255 
    256 /*
    257  * PV table management functions.
    258  */
    259 void	*pmap_pv_page_alloc(struct pool *, int);
    260 void	pmap_pv_page_free(struct pool *, void *);
    261 
    262 struct pool_allocator pmap_pv_page_allocator = {
    263 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    264 };
    265 
    266 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    267 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    268 
    269 #ifndef PMAP_NEED_TLB_MISS_LOCK
    270 
    271 #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
    272 #define	PMAP_NEED_TLB_MISS_LOCK
    273 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
    274 
    275 #endif /* PMAP_NEED_TLB_MISS_LOCK */
    276 
    277 #ifdef PMAP_NEED_TLB_MISS_LOCK
    278 
    279 #ifdef PMAP_MD_NEED_TLB_MISS_LOCK
    280 #define	pmap_tlb_miss_lock_init()	__nothing /* MD code deals with this */
    281 #define	pmap_tlb_miss_lock_enter()	pmap_md_tlb_miss_lock_enter()
    282 #define	pmap_tlb_miss_lock_exit()	pmap_md_tlb_miss_lock_exit()
    283 #else
    284 kmutex_t pmap_tlb_miss_lock 		__cacheline_aligned;
    285 
    286 static void
    287 pmap_tlb_miss_lock_init(void)
    288 {
    289 	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
    290 }
    291 
    292 static inline void
    293 pmap_tlb_miss_lock_enter(void)
    294 {
    295 	mutex_spin_enter(&pmap_tlb_miss_lock);
    296 }
    297 
    298 static inline void
    299 pmap_tlb_miss_lock_exit(void)
    300 {
    301 	mutex_spin_exit(&pmap_tlb_miss_lock);
    302 }
    303 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
    304 
    305 #else
    306 
    307 #define	pmap_tlb_miss_lock_init()	__nothing
    308 #define	pmap_tlb_miss_lock_enter()	__nothing
    309 #define	pmap_tlb_miss_lock_exit()	__nothing
    310 
    311 #endif /* PMAP_NEED_TLB_MISS_LOCK */
    312 
    313 #ifndef MULTIPROCESSOR
    314 kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
    315 #endif
    316 
    317 /*
    318  * Debug functions.
    319  */
    320 
    321 #ifdef DEBUG
    322 static inline void
    323 pmap_asid_check(pmap_t pm, const char *func)
    324 {
    325 	if (!PMAP_IS_ACTIVE(pm))
    326 		return;
    327 
    328 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
    329 	tlb_asid_t asid = tlb_get_asid();
    330 	if (asid != pai->pai_asid)
    331 		panic("%s: inconsistency for active TLB update: %u <-> %u",
    332 		    func, asid, pai->pai_asid);
    333 }
    334 #endif
    335 
    336 static void
    337 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
    338 {
    339 #ifdef DEBUG
    340 	if (pmap == pmap_kernel()) {
    341 		if (sva < VM_MIN_KERNEL_ADDRESS)
    342 			panic("%s: kva %#"PRIxVADDR" not in range",
    343 			    func, sva);
    344 		if (eva >= pmap_limits.virtual_end)
    345 			panic("%s: kva %#"PRIxVADDR" not in range",
    346 			    func, eva);
    347 	} else {
    348 		if (eva > VM_MAXUSER_ADDRESS)
    349 			panic("%s: uva %#"PRIxVADDR" not in range",
    350 			    func, eva);
    351 		pmap_asid_check(pmap, func);
    352 	}
    353 #endif
    354 }
    355 
    356 /*
    357  * Misc. functions.
    358  */
    359 
    360 bool
    361 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    362 {
    363 	volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
    364 #ifdef MULTIPROCESSOR
    365 	for (;;) {
    366 		u_int old_attr = *attrp;
    367 		if ((old_attr & clear_attributes) == 0)
    368 			return false;
    369 		u_int new_attr = old_attr & ~clear_attributes;
    370 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
    371 			return true;
    372 	}
    373 #else
    374 	unsigned long old_attr = *attrp;
    375 	if ((old_attr & clear_attributes) == 0)
    376 		return false;
    377 	*attrp &= ~clear_attributes;
    378 	return true;
    379 #endif
    380 }
    381 
    382 void
    383 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    384 {
    385 #ifdef MULTIPROCESSOR
    386 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
    387 #else
    388 	mdpg->mdpg_attrs |= set_attributes;
    389 #endif
    390 }
    391 
    392 static void
    393 pmap_page_syncicache(struct vm_page *pg)
    394 {
    395 	UVMHIST_FUNC(__func__);
    396 	UVMHIST_CALLED(pmaphist);
    397 #ifndef MULTIPROCESSOR
    398 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
    399 #endif
    400 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    401 	pv_entry_t pv = &mdpg->mdpg_first;
    402 	kcpuset_t *onproc;
    403 #ifdef MULTIPROCESSOR
    404 	kcpuset_create(&onproc, true);
    405 	KASSERT(onproc != NULL);
    406 #else
    407 	onproc = NULL;
    408 #endif
    409 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
    410 	pmap_pvlist_check(mdpg);
    411 
    412 	UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv,
    413 	    (uintptr_t)pv->pv_pmap, 0, 0);
    414 
    415 	if (pv->pv_pmap != NULL) {
    416 		for (; pv != NULL; pv = pv->pv_next) {
    417 #ifdef MULTIPROCESSOR
    418 			UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx",
    419 			    (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0);
    420 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    421 			if (kcpuset_match(onproc, kcpuset_running)) {
    422 				break;
    423 			}
    424 #else
    425 			if (pv->pv_pmap == curpmap) {
    426 				onproc = curcpu()->ci_data.cpu_kcpuset;
    427 				break;
    428 			}
    429 #endif
    430 		}
    431 	}
    432 	pmap_pvlist_check(mdpg);
    433 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    434 	kpreempt_disable();
    435 	pmap_md_page_syncicache(mdpg, onproc);
    436 	kpreempt_enable();
    437 #ifdef MULTIPROCESSOR
    438 	kcpuset_destroy(onproc);
    439 #endif
    440 }
    441 
    442 /*
    443  * Define the initial bounds of the kernel virtual address space.
    444  */
    445 void
    446 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    447 {
    448 
    449 	*vstartp = pmap_limits.virtual_start;
    450 	*vendp = pmap_limits.virtual_end;
    451 }
    452 
    453 vaddr_t
    454 pmap_growkernel(vaddr_t maxkvaddr)
    455 {
    456 	vaddr_t virtual_end = pmap_limits.virtual_end;
    457 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    458 
    459 	/*
    460 	 * Reserve PTEs for the new KVA space.
    461 	 */
    462 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    463 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    464 	}
    465 
    466 	/*
    467 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    468 	 */
    469 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    470 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    471 
    472 	/*
    473 	 * Update new end.
    474 	 */
    475 	pmap_limits.virtual_end = virtual_end;
    476 	return virtual_end;
    477 }
    478 
    479 /*
    480  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    481  * This function allows for early dynamic memory allocation until the virtual
    482  * memory system has been bootstrapped.  After that point, either kmem_alloc
    483  * or malloc should be used.  This function works by stealing pages from the
    484  * (to be) managed page pool, then implicitly mapping the pages (by using
    485  * their direct mapped addresses) and zeroing them.
    486  *
    487  * It may be used once the physical memory segments have been pre-loaded
    488  * into the vm_physmem[] array.  Early memory allocation MUST use this
    489  * interface!  This cannot be used after vm_page_startup(), and will
    490  * generate a panic if tried.
    491  *
    492  * Note that this memory will never be freed, and in essence it is wired
    493  * down.
    494  *
    495  * We must adjust *vstartp and/or *vendp iff we use address space
    496  * from the kernel virtual address range defined by pmap_virtual_space().
    497  */
    498 vaddr_t
    499 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    500 {
    501 	size_t npgs;
    502 	paddr_t pa;
    503 	vaddr_t va;
    504 
    505 	uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
    506 
    507 	size = round_page(size);
    508 	npgs = atop(size);
    509 
    510 	aprint_debug("%s: need %zu pages\n", __func__, npgs);
    511 
    512 	for (uvm_physseg_t bank = uvm_physseg_get_first();
    513 	     uvm_physseg_valid_p(bank);
    514 	     bank = uvm_physseg_get_next(bank)) {
    515 
    516 		if (uvm.page_init_done == true)
    517 			panic("pmap_steal_memory: called _after_ bootstrap");
    518 
    519 		aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
    520 		    __func__, bank,
    521 		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
    522 		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
    523 
    524 		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
    525 		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
    526 			aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
    527 			continue;
    528 		}
    529 
    530 		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
    531 			aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
    532 			    __func__, bank, npgs);
    533 			continue;
    534 		}
    535 
    536 		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
    537 			continue;
    538 		}
    539 
    540 		/*
    541 		 * Always try to allocate from the segment with the least
    542 		 * amount of space left.
    543 		 */
    544 #define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
    545 		if (uvm_physseg_valid_p(maybe_bank) == false
    546 		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
    547 			maybe_bank = bank;
    548 		}
    549 	}
    550 
    551 	if (uvm_physseg_valid_p(maybe_bank)) {
    552 		const uvm_physseg_t bank = maybe_bank;
    553 
    554 		/*
    555 		 * There are enough pages here; steal them!
    556 		 */
    557 		pa = ptoa(uvm_physseg_get_start(bank));
    558 		uvm_physseg_unplug(atop(pa), npgs);
    559 
    560 		aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
    561 		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
    562 
    563 		va = pmap_md_map_poolpage(pa, size);
    564 		memset((void *)va, 0, size);
    565 		return va;
    566 	}
    567 
    568 	/*
    569 	 * If we got here, there was no memory left.
    570 	 */
    571 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
    572 }
    573 
    574 /*
    575  *	Bootstrap the system enough to run with virtual memory.
    576  *	(Common routine called by machine-dependent bootstrap code.)
    577  */
    578 void
    579 pmap_bootstrap_common(void)
    580 {
    581 	pmap_tlb_miss_lock_init();
    582 }
    583 
    584 /*
    585  *	Initialize the pmap module.
    586  *	Called by vm_init, to initialize any structures that the pmap
    587  *	system needs to map virtual memory.
    588  */
    589 void
    590 pmap_init(void)
    591 {
    592 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    593 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    594 	UVMHIST_INIT_STATIC(pmapsegtabhist, pmapsegtabhistbuf);
    595 
    596 	UVMHIST_FUNC(__func__);
    597 	UVMHIST_CALLED(pmaphist);
    598 
    599 	/*
    600 	 * Initialize the segtab lock.
    601 	 */
    602 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    603 
    604 	/*
    605 	 * Set a low water mark on the pv_entry pool, so that we are
    606 	 * more likely to have these around even in extreme memory
    607 	 * starvation.
    608 	 */
    609 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    610 
    611 	/*
    612 	 * Set the page colormask but allow pmap_md_init to override it.
    613 	 */
    614 	pmap_page_colormask = ptoa(uvmexp.colormask);
    615 
    616 	pmap_md_init();
    617 
    618 	/*
    619 	 * Now it is safe to enable pv entry recording.
    620 	 */
    621 	pmap_initialized = true;
    622 }
    623 
    624 /*
    625  *	Create and return a physical map.
    626  *
    627  *	If the size specified for the map
    628  *	is zero, the map is an actual physical
    629  *	map, and may be referenced by the
    630  *	hardware.
    631  *
    632  *	If the size specified is non-zero,
    633  *	the map will be used in software only, and
    634  *	is bounded by that size.
    635  */
    636 pmap_t
    637 pmap_create(void)
    638 {
    639 	UVMHIST_FUNC(__func__);
    640 	UVMHIST_CALLED(pmaphist);
    641 	PMAP_COUNT(create);
    642 
    643 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    644 	memset(pmap, 0, PMAP_SIZE);
    645 
    646 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    647 
    648 	pmap->pm_count = 1;
    649 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    650 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    651 
    652 	pmap_segtab_init(pmap);
    653 
    654 #ifdef MULTIPROCESSOR
    655 	kcpuset_create(&pmap->pm_active, true);
    656 	kcpuset_create(&pmap->pm_onproc, true);
    657 	KASSERT(pmap->pm_active != NULL);
    658 	KASSERT(pmap->pm_onproc != NULL);
    659 #endif
    660 
    661 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
    662 	    0, 0, 0);
    663 
    664 	return pmap;
    665 }
    666 
    667 /*
    668  *	Retire the given physical map from service.
    669  *	Should only be called if the map contains
    670  *	no valid mappings.
    671  */
    672 void
    673 pmap_destroy(pmap_t pmap)
    674 {
    675 	UVMHIST_FUNC(__func__);
    676 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    677 
    678 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    679 		PMAP_COUNT(dereference);
    680 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
    681 		return;
    682 	}
    683 
    684 	PMAP_COUNT(destroy);
    685 	KASSERT(pmap->pm_count == 0);
    686 	kpreempt_disable();
    687 	pmap_tlb_miss_lock_enter();
    688 	pmap_tlb_asid_release_all(pmap);
    689 	pmap_segtab_destroy(pmap, NULL, 0);
    690 	pmap_tlb_miss_lock_exit();
    691 
    692 #ifdef MULTIPROCESSOR
    693 	kcpuset_destroy(pmap->pm_active);
    694 	kcpuset_destroy(pmap->pm_onproc);
    695 	pmap->pm_active = NULL;
    696 	pmap->pm_onproc = NULL;
    697 #endif
    698 
    699 	pool_put(&pmap_pmap_pool, pmap);
    700 	kpreempt_enable();
    701 
    702 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
    703 }
    704 
    705 /*
    706  *	Add a reference to the specified pmap.
    707  */
    708 void
    709 pmap_reference(pmap_t pmap)
    710 {
    711 	UVMHIST_FUNC(__func__);
    712 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    713 	PMAP_COUNT(reference);
    714 
    715 	if (pmap != NULL) {
    716 		atomic_inc_uint(&pmap->pm_count);
    717 	}
    718 
    719 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    720 }
    721 
    722 /*
    723  *	Make a new pmap (vmspace) active for the given process.
    724  */
    725 void
    726 pmap_activate(struct lwp *l)
    727 {
    728 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    729 
    730 	UVMHIST_FUNC(__func__);
    731 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    732 	    (uintptr_t)pmap, 0, 0);
    733 	PMAP_COUNT(activate);
    734 
    735 	kpreempt_disable();
    736 	pmap_tlb_miss_lock_enter();
    737 	pmap_tlb_asid_acquire(pmap, l);
    738 	pmap_segtab_activate(pmap, l);
    739 	pmap_tlb_miss_lock_exit();
    740 	kpreempt_enable();
    741 
    742 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    743 	    l->l_lid, 0, 0);
    744 }
    745 
    746 /*
    747  * Remove this page from all physical maps in which it resides.
    748  * Reflects back modify bits to the pager.
    749  */
    750 void
    751 pmap_page_remove(struct vm_page_md *mdpg)
    752 {
    753 	kpreempt_disable();
    754 	VM_PAGEMD_PVLIST_LOCK(mdpg);
    755 	pmap_pvlist_check(mdpg);
    756 
    757 	struct vm_page * const pg =
    758 	    VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
    759 
    760 	UVMHIST_FUNC(__func__);
    761 	if (pg) {
    762 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
    763 		    "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
    764 		    VM_PAGE_TO_PHYS(pg), 0);
    765 	} else {
    766 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
    767 		    0, 0);
    768 	}
    769 
    770 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    771 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED);
    772 #else
    773 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
    774 #endif
    775 	PMAP_COUNT(exec_uncached_remove);
    776 
    777 	pv_entry_t pv = &mdpg->mdpg_first;
    778 	if (pv->pv_pmap == NULL) {
    779 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    780 		kpreempt_enable();
    781 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
    782 		return;
    783 	}
    784 
    785 	pv_entry_t npv;
    786 	pv_entry_t pvp = NULL;
    787 
    788 	for (; pv != NULL; pv = npv) {
    789 		npv = pv->pv_next;
    790 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    791 		if (PV_ISKENTER_P(pv)) {
    792 			UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    793 			    " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
    794 			    pv->pv_va, 0);
    795 
    796 			KASSERT(pv->pv_pmap == pmap_kernel());
    797 
    798 			/* Assume no more - it'll get fixed if there are */
    799 			pv->pv_next = NULL;
    800 
    801 			/*
    802 			 * pvp is non-null when we already have a PV_KENTER
    803 			 * pv in pvh_first; otherwise we haven't seen a
    804 			 * PV_KENTER pv and we need to copy this one to
    805 			 * pvh_first
    806 			 */
    807 			if (pvp) {
    808 				/*
    809 				 * The previous PV_KENTER pv needs to point to
    810 				 * this PV_KENTER pv
    811 				 */
    812 				pvp->pv_next = pv;
    813 			} else {
    814 				pv_entry_t fpv = &mdpg->mdpg_first;
    815 				*fpv = *pv;
    816 				KASSERT(fpv->pv_pmap == pmap_kernel());
    817 			}
    818 			pvp = pv;
    819 			continue;
    820 		}
    821 #endif
    822 		const pmap_t pmap = pv->pv_pmap;
    823 		vaddr_t va = trunc_page(pv->pv_va);
    824 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    825 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
    826 		    pmap_limits.virtual_end);
    827 		pt_entry_t pte = *ptep;
    828 		UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    829 		    " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va,
    830 		    pte_value(pte));
    831 		if (!pte_valid_p(pte))
    832 			continue;
    833 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    834 		if (is_kernel_pmap_p) {
    835 			PMAP_COUNT(remove_kernel_pages);
    836 		} else {
    837 			PMAP_COUNT(remove_user_pages);
    838 		}
    839 		if (pte_wired_p(pte))
    840 			pmap->pm_stats.wired_count--;
    841 		pmap->pm_stats.resident_count--;
    842 
    843 		pmap_tlb_miss_lock_enter();
    844 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    845 		pte_set(ptep, npte);
    846 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    847 			/*
    848 			 * Flush the TLB for the given address.
    849 			 */
    850 			pmap_tlb_invalidate_addr(pmap, va);
    851 		}
    852 		pmap_tlb_miss_lock_exit();
    853 
    854 		/*
    855 		 * non-null means this is a non-pvh_first pv, so we should
    856 		 * free it.
    857 		 */
    858 		if (pvp) {
    859 			KASSERT(pvp->pv_pmap == pmap_kernel());
    860 			KASSERT(pvp->pv_next == NULL);
    861 			pmap_pv_free(pv);
    862 		} else {
    863 			pv->pv_pmap = NULL;
    864 			pv->pv_next = NULL;
    865 		}
    866 	}
    867 
    868 	pmap_pvlist_check(mdpg);
    869 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    870 	kpreempt_enable();
    871 
    872 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    873 }
    874 
    875 #ifdef __HAVE_PMAP_PV_TRACK
    876 /*
    877  * pmap_pv_protect: change protection of an unmanaged pv-tracked page from
    878  * all pmaps that map it
    879  */
    880 void
    881 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
    882 {
    883 
    884 	/* the only case is remove at the moment */
    885 	KASSERT(prot == VM_PROT_NONE);
    886 	struct pmap_page *pp;
    887 
    888 	pp = pmap_pv_tracked(pa);
    889 	if (pp == NULL)
    890 		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
    891 		    pa);
    892 
    893 	struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
    894 	pmap_page_remove(mdpg);
    895 }
    896 #endif
    897 
    898 /*
    899  *	Make a previously active pmap (vmspace) inactive.
    900  */
    901 void
    902 pmap_deactivate(struct lwp *l)
    903 {
    904 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    905 
    906 	UVMHIST_FUNC(__func__);
    907 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    908 	    (uintptr_t)pmap, 0, 0);
    909 	PMAP_COUNT(deactivate);
    910 
    911 	kpreempt_disable();
    912 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
    913 	pmap_tlb_miss_lock_enter();
    914 	pmap_tlb_asid_deactivate(pmap);
    915 	pmap_segtab_deactivate(pmap);
    916 	pmap_tlb_miss_lock_exit();
    917 	kpreempt_enable();
    918 
    919 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    920 	    l->l_lid, 0, 0);
    921 }
    922 
    923 void
    924 pmap_update(struct pmap *pmap)
    925 {
    926 	UVMHIST_FUNC(__func__);
    927 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    928 	PMAP_COUNT(update);
    929 
    930 	kpreempt_disable();
    931 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
    932 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    933 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    934 		PMAP_COUNT(shootdown_ipis);
    935 #endif
    936 	pmap_tlb_miss_lock_enter();
    937 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
    938 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    939 #endif /* DEBUG */
    940 
    941 	/*
    942 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    943 	 * our ASID.  Now we have to reactivate ourselves.
    944 	 */
    945 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    946 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    947 		pmap_tlb_asid_acquire(pmap, curlwp);
    948 		pmap_segtab_activate(pmap, curlwp);
    949 	}
    950 	pmap_tlb_miss_lock_exit();
    951 	kpreempt_enable();
    952 
    953 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%jx)",
    954 		    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
    955 }
    956 
    957 /*
    958  *	Remove the given range of addresses from the specified map.
    959  *
    960  *	It is assumed that the start and end are properly
    961  *	rounded to the page size.
    962  */
    963 
    964 static bool
    965 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    966 	uintptr_t flags)
    967 {
    968 	const pt_entry_t npte = flags;
    969 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    970 
    971 	UVMHIST_FUNC(__func__);
    972 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
    973 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
    974 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
    975 	    (uintptr_t)ptep, flags, 0, 0);
    976 
    977 	KASSERT(kpreempt_disabled());
    978 
    979 	for (; sva < eva; sva += NBPG, ptep++) {
    980 		const pt_entry_t pte = *ptep;
    981 		if (!pte_valid_p(pte))
    982 			continue;
    983 		if (is_kernel_pmap_p) {
    984 			PMAP_COUNT(remove_kernel_pages);
    985 		} else {
    986 			PMAP_COUNT(remove_user_pages);
    987 		}
    988 		if (pte_wired_p(pte))
    989 			pmap->pm_stats.wired_count--;
    990 		pmap->pm_stats.resident_count--;
    991 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
    992 		if (__predict_true(pg != NULL)) {
    993 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
    994 		}
    995 		pmap_tlb_miss_lock_enter();
    996 		pte_set(ptep, npte);
    997 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    998 
    999 			/*
   1000 			 * Flush the TLB for the given address.
   1001 			 */
   1002 			pmap_tlb_invalidate_addr(pmap, sva);
   1003 		}
   1004 		pmap_tlb_miss_lock_exit();
   1005 	}
   1006 
   1007 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1008 
   1009 	return false;
   1010 }
   1011 
   1012 void
   1013 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
   1014 {
   1015 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1016 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
   1017 
   1018 	UVMHIST_FUNC(__func__);
   1019 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
   1020 	    (uintptr_t)pmap, sva, eva, 0);
   1021 
   1022 	if (is_kernel_pmap_p) {
   1023 		PMAP_COUNT(remove_kernel_calls);
   1024 	} else {
   1025 		PMAP_COUNT(remove_user_calls);
   1026 	}
   1027 #ifdef PMAP_FAULTINFO
   1028 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1029 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1030 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
   1031 #endif
   1032 	kpreempt_disable();
   1033 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1034 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
   1035 	kpreempt_enable();
   1036 
   1037 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1038 }
   1039 
   1040 /*
   1041  *	pmap_page_protect:
   1042  *
   1043  *	Lower the permission for all mappings to a given page.
   1044  */
   1045 void
   1046 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1047 {
   1048 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1049 	pv_entry_t pv;
   1050 	vaddr_t va;
   1051 
   1052 	UVMHIST_FUNC(__func__);
   1053 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
   1054 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
   1055 	PMAP_COUNT(page_protect);
   1056 
   1057 	switch (prot) {
   1058 	case VM_PROT_READ|VM_PROT_WRITE:
   1059 	case VM_PROT_ALL:
   1060 		break;
   1061 
   1062 	/* copy_on_write */
   1063 	case VM_PROT_READ:
   1064 	case VM_PROT_READ|VM_PROT_EXECUTE:
   1065 		pv = &mdpg->mdpg_first;
   1066 		kpreempt_disable();
   1067 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1068 		pmap_pvlist_check(mdpg);
   1069 		/*
   1070 		 * Loop over all current mappings setting/clearing as
   1071 		 * appropriate.
   1072 		 */
   1073 		if (pv->pv_pmap != NULL) {
   1074 			while (pv != NULL) {
   1075 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1076 				if (PV_ISKENTER_P(pv)) {
   1077 					pv = pv->pv_next;
   1078 					continue;
   1079 				}
   1080 #endif
   1081 				const pmap_t pmap = pv->pv_pmap;
   1082 				va = trunc_page(pv->pv_va);
   1083 				const uintptr_t gen =
   1084 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1085 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
   1086 				KASSERT(pv->pv_pmap == pmap);
   1087 				pmap_update(pmap);
   1088 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
   1089 					pv = &mdpg->mdpg_first;
   1090 				} else {
   1091 					pv = pv->pv_next;
   1092 				}
   1093 				pmap_pvlist_check(mdpg);
   1094 			}
   1095 		}
   1096 		pmap_pvlist_check(mdpg);
   1097 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1098 		kpreempt_enable();
   1099 		break;
   1100 
   1101 	/* remove_all */
   1102 	default:
   1103 		pmap_page_remove(mdpg);
   1104 	}
   1105 
   1106 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1107 }
   1108 
   1109 static bool
   1110 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1111 	uintptr_t flags)
   1112 {
   1113 	const vm_prot_t prot = (flags & VM_PROT_ALL);
   1114 
   1115 	UVMHIST_FUNC(__func__);
   1116 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
   1117 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
   1118 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1119 	    (uintptr_t)ptep, flags, 0, 0);
   1120 
   1121 	KASSERT(kpreempt_disabled());
   1122 	/*
   1123 	 * Change protection on every valid mapping within this segment.
   1124 	 */
   1125 	for (; sva < eva; sva += NBPG, ptep++) {
   1126 		pt_entry_t pte = *ptep;
   1127 		if (!pte_valid_p(pte))
   1128 			continue;
   1129 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1130 		if (pg != NULL && pte_modified_p(pte)) {
   1131 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1132 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1133 				KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg));
   1134 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1135 				if (VM_PAGEMD_CACHED_P(mdpg)) {
   1136 #endif
   1137 					UVMHIST_LOG(pmapexechist,
   1138 					    "pg %#jx (pa %#jx): "
   1139 					    "syncicached performed",
   1140 					    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
   1141 					    0, 0);
   1142 					pmap_page_syncicache(pg);
   1143 					PMAP_COUNT(exec_synced_protect);
   1144 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1145 				}
   1146 #endif
   1147 			}
   1148 		}
   1149 		pte = pte_prot_downgrade(pte, prot);
   1150 		if (*ptep != pte) {
   1151 			pmap_tlb_miss_lock_enter();
   1152 			pte_set(ptep, pte);
   1153 			/*
   1154 			 * Update the TLB if needed.
   1155 			 */
   1156 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
   1157 			pmap_tlb_miss_lock_exit();
   1158 		}
   1159 	}
   1160 
   1161 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1162 
   1163 	return false;
   1164 }
   1165 
   1166 /*
   1167  *	Set the physical protection on the
   1168  *	specified range of this map as requested.
   1169  */
   1170 void
   1171 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1172 {
   1173 	UVMHIST_FUNC(__func__);
   1174 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
   1175 	    (uintptr_t)pmap, sva, eva, prot);
   1176 	PMAP_COUNT(protect);
   1177 
   1178 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1179 		pmap_remove(pmap, sva, eva);
   1180 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1181 		return;
   1182 	}
   1183 
   1184 	/*
   1185 	 * Change protection on every valid mapping within this segment.
   1186 	 */
   1187 	kpreempt_disable();
   1188 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1189 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
   1190 	kpreempt_enable();
   1191 
   1192 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1193 }
   1194 
   1195 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
   1196 /*
   1197  *	pmap_page_cache:
   1198  *
   1199  *	Change all mappings of a managed page to cached/uncached.
   1200  */
   1201 void
   1202 pmap_page_cache(struct vm_page_md *mdpg, bool cached)
   1203 {
   1204 #ifdef UVMHIST
   1205 	const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
   1206 	struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
   1207 #endif
   1208 
   1209 	UVMHIST_FUNC(__func__);
   1210 	UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
   1211 	    (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
   1212 
   1213 	KASSERT(kpreempt_disabled());
   1214 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1215 
   1216 	if (cached) {
   1217 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1218 		PMAP_COUNT(page_cache_restorations);
   1219 	} else {
   1220 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1221 		PMAP_COUNT(page_cache_evictions);
   1222 	}
   1223 
   1224 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
   1225 		pmap_t pmap = pv->pv_pmap;
   1226 		vaddr_t va = trunc_page(pv->pv_va);
   1227 
   1228 		KASSERT(pmap != NULL);
   1229 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1230 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1231 		if (ptep == NULL)
   1232 			continue;
   1233 		pt_entry_t pte = *ptep;
   1234 		if (pte_valid_p(pte)) {
   1235 			pte = pte_cached_change(pte, cached);
   1236 			pmap_tlb_miss_lock_enter();
   1237 			pte_set(ptep, pte);
   1238 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
   1239 			pmap_tlb_miss_lock_exit();
   1240 		}
   1241 	}
   1242 
   1243 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1244 }
   1245 #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
   1246 
   1247 /*
   1248  *	Insert the given physical page (p) at
   1249  *	the specified virtual address (v) in the
   1250  *	target physical map with the protection requested.
   1251  *
   1252  *	If specified, the page will be wired down, meaning
   1253  *	that the related pte can not be reclaimed.
   1254  *
   1255  *	NB:  This is the only routine which MAY NOT lazy-evaluate
   1256  *	or lose information.  That is, this routine must actually
   1257  *	insert this page into the given map NOW.
   1258  */
   1259 int
   1260 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1261 {
   1262 	const bool wired = (flags & PMAP_WIRED) != 0;
   1263 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1264 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
   1265 #ifdef UVMHIST
   1266 	struct kern_history * const histp =
   1267 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
   1268 #endif
   1269 
   1270 	UVMHIST_FUNC(__func__);
   1271 	UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
   1272 	    (uintptr_t)pmap, va, pa, 0);
   1273 	UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
   1274 
   1275 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1276 	if (is_kernel_pmap_p) {
   1277 		PMAP_COUNT(kernel_mappings);
   1278 		if (!good_color)
   1279 			PMAP_COUNT(kernel_mappings_bad);
   1280 	} else {
   1281 		PMAP_COUNT(user_mappings);
   1282 		if (!good_color)
   1283 			PMAP_COUNT(user_mappings_bad);
   1284 	}
   1285 	pmap_addr_range_check(pmap, va, va, __func__);
   1286 
   1287 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
   1288 	    VM_PROT_READ, prot);
   1289 
   1290 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1291 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1292 
   1293 	struct vm_page_md *mdpp = NULL;
   1294 #ifdef __HAVE_PMAP_PV_TRACK
   1295 	struct pmap_page *pp = pmap_pv_tracked(pa);
   1296 	mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
   1297 #endif
   1298 
   1299 	if (mdpg) {
   1300 		/* Set page referenced/modified status based on flags */
   1301 		if (flags & VM_PROT_WRITE) {
   1302 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1303 		} else if (flags & VM_PROT_ALL) {
   1304 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1305 		}
   1306 
   1307 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1308 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
   1309 			flags |= PMAP_NOCACHE;
   1310 			PMAP_COUNT(uncached_mappings);
   1311 		}
   1312 #endif
   1313 
   1314 		PMAP_COUNT(managed_mappings);
   1315 	} else if (mdpp) {
   1316 #ifdef __HAVE_PMAP_PV_TRACK
   1317 		pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1318 
   1319 		PMAP_COUNT(pvtracked_mappings);
   1320 #endif
   1321 	} else {
   1322 		/*
   1323 		 * Assumption: if it is not part of our managed memory
   1324 		 * then it must be device memory which may be volatile.
   1325 		 */
   1326 		if ((flags & PMAP_CACHE_MASK) == 0)
   1327 			flags |= PMAP_NOCACHE;
   1328 		PMAP_COUNT(unmanaged_mappings);
   1329 	}
   1330 
   1331 	KASSERTMSG(mdpg == NULL || mdpp == NULL, "mdpg %p mdpp %p", mdpg, mdpp);
   1332 
   1333 	struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
   1334 	pt_entry_t npte = pte_make_enter(pa, md, prot, flags,
   1335 	    is_kernel_pmap_p);
   1336 
   1337 	kpreempt_disable();
   1338 
   1339 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1340 	if (__predict_false(ptep == NULL)) {
   1341 		kpreempt_enable();
   1342 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
   1343 		return ENOMEM;
   1344 	}
   1345 	const pt_entry_t opte = *ptep;
   1346 	const bool resident = pte_valid_p(opte);
   1347 	bool remap = false;
   1348 	if (resident) {
   1349 		if (pte_to_paddr(opte) != pa) {
   1350 			KASSERT(!is_kernel_pmap_p);
   1351 		    	const pt_entry_t rpte = pte_nv_entry(false);
   1352 
   1353 			pmap_addr_range_check(pmap, va, va + NBPG, __func__);
   1354 			pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
   1355 			    rpte);
   1356 			PMAP_COUNT(user_mappings_changed);
   1357 			remap = true;
   1358 		}
   1359 		update_flags |= PMAP_TLB_NEED_IPI;
   1360 	}
   1361 
   1362 	if (!resident || remap) {
   1363 		pmap->pm_stats.resident_count++;
   1364 	}
   1365 
   1366 	/* Done after case that may sleep/return. */
   1367 	if (md)
   1368 		pmap_enter_pv(pmap, va, pa, md, &npte, 0);
   1369 
   1370 	/*
   1371 	 * Now validate mapping with desired protection/wiring.
   1372 	 */
   1373 	if (wired) {
   1374 		pmap->pm_stats.wired_count++;
   1375 		npte = pte_wire_entry(npte);
   1376 	}
   1377 
   1378 	UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
   1379 	    pte_value(npte), pa, 0, 0);
   1380 
   1381 	KASSERT(pte_valid_p(npte));
   1382 
   1383 	pmap_tlb_miss_lock_enter();
   1384 	pte_set(ptep, npte);
   1385 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
   1386 	pmap_tlb_miss_lock_exit();
   1387 	kpreempt_enable();
   1388 
   1389 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1390 		KASSERT(mdpg != NULL);
   1391 		PMAP_COUNT(exec_mappings);
   1392 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1393 			if (!pte_deferred_exec_p(npte)) {
   1394 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
   1395 				    "immediate syncicache",
   1396 				    va, (uintptr_t)pg, 0, 0);
   1397 				pmap_page_syncicache(pg);
   1398 				pmap_page_set_attributes(mdpg,
   1399 				    VM_PAGEMD_EXECPAGE);
   1400 				PMAP_COUNT(exec_synced_mappings);
   1401 			} else {
   1402 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
   1403 				    "syncicache: pte %#jx",
   1404 				    va, (uintptr_t)pg, npte, 0);
   1405 			}
   1406 		} else {
   1407 			UVMHIST_LOG(*histp,
   1408 			    "va=%#jx pg %#jx: no syncicache cached %jd",
   1409 			    va, (uintptr_t)pg, pte_cached_p(npte), 0);
   1410 		}
   1411 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1412 		KASSERT(mdpg != NULL);
   1413 		KASSERT(prot & VM_PROT_WRITE);
   1414 		PMAP_COUNT(exec_mappings);
   1415 		pmap_page_syncicache(pg);
   1416 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1417 		UVMHIST_LOG(*histp,
   1418 		    "va=%#jx pg %#jx: immediate syncicache (writeable)",
   1419 		    va, (uintptr_t)pg, 0, 0);
   1420 	}
   1421 
   1422 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
   1423 	return 0;
   1424 }
   1425 
   1426 void
   1427 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1428 {
   1429 	pmap_t pmap = pmap_kernel();
   1430 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1431 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1432 
   1433 	UVMHIST_FUNC(__func__);
   1434 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
   1435 	    va, pa, prot, flags);
   1436 	PMAP_COUNT(kenter_pa);
   1437 
   1438 	if (mdpg == NULL) {
   1439 		PMAP_COUNT(kenter_pa_unmanaged);
   1440 		if ((flags & PMAP_CACHE_MASK) == 0)
   1441 			flags |= PMAP_NOCACHE;
   1442 	} else {
   1443 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1444 			PMAP_COUNT(kenter_pa_bad);
   1445 	}
   1446 
   1447 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1448 	kpreempt_disable();
   1449 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1450 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
   1451 	    pmap_limits.virtual_end);
   1452 	KASSERT(!pte_valid_p(*ptep));
   1453 
   1454 	/*
   1455 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
   1456 	 */
   1457 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1458 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0
   1459 	    && pmap_md_virtual_cache_aliasing_p()) {
   1460 		pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
   1461 	}
   1462 #endif
   1463 
   1464 	/*
   1465 	 * We have the option to force this mapping into the TLB but we
   1466 	 * don't.  Instead let the next reference to the page do it.
   1467 	 */
   1468 	pmap_tlb_miss_lock_enter();
   1469 	pte_set(ptep, npte);
   1470 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1471 	pmap_tlb_miss_lock_exit();
   1472 	kpreempt_enable();
   1473 #if DEBUG > 1
   1474 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1475 		if (((long *)va)[i] != ((long *)pa)[i])
   1476 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1477 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1478 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1479 	}
   1480 #endif
   1481 
   1482 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
   1483 	    0);
   1484 }
   1485 
   1486 /*
   1487  *	Remove the given range of addresses from the kernel map.
   1488  *
   1489  *	It is assumed that the start and end are properly
   1490  *	rounded to the page size.
   1491  */
   1492 
   1493 static bool
   1494 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1495 	uintptr_t flags)
   1496 {
   1497 	const pt_entry_t new_pte = pte_nv_entry(true);
   1498 
   1499 	UVMHIST_FUNC(__func__);
   1500 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
   1501 	    (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
   1502 
   1503 	KASSERT(kpreempt_disabled());
   1504 
   1505 	for (; sva < eva; sva += NBPG, ptep++) {
   1506 		pt_entry_t pte = *ptep;
   1507 		if (!pte_valid_p(pte))
   1508 			continue;
   1509 
   1510 		PMAP_COUNT(kremove_pages);
   1511 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1512 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1513 		if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
   1514 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
   1515 		}
   1516 #endif
   1517 
   1518 		pmap_tlb_miss_lock_enter();
   1519 		pte_set(ptep, new_pte);
   1520 		pmap_tlb_invalidate_addr(pmap, sva);
   1521 		pmap_tlb_miss_lock_exit();
   1522 	}
   1523 
   1524 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1525 
   1526 	return false;
   1527 }
   1528 
   1529 void
   1530 pmap_kremove(vaddr_t va, vsize_t len)
   1531 {
   1532 	const vaddr_t sva = trunc_page(va);
   1533 	const vaddr_t eva = round_page(va + len);
   1534 
   1535 	UVMHIST_FUNC(__func__);
   1536 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
   1537 
   1538 	kpreempt_disable();
   1539 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1540 	kpreempt_enable();
   1541 
   1542 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1543 }
   1544 
   1545 bool
   1546 pmap_remove_all(struct pmap *pmap)
   1547 {
   1548 	UVMHIST_FUNC(__func__);
   1549 	UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1550 
   1551 	KASSERT(pmap != pmap_kernel());
   1552 
   1553 	kpreempt_disable();
   1554 	/*
   1555 	 * Free all of our ASIDs which means we can skip doing all the
   1556 	 * tlb_invalidate_addrs().
   1557 	 */
   1558 	pmap_tlb_miss_lock_enter();
   1559 #ifdef MULTIPROCESSOR
   1560 	// This should be the last CPU with this pmap onproc
   1561 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
   1562 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
   1563 #endif
   1564 		pmap_tlb_asid_deactivate(pmap);
   1565 #ifdef MULTIPROCESSOR
   1566 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
   1567 #endif
   1568 	pmap_tlb_asid_release_all(pmap);
   1569 	pmap_tlb_miss_lock_exit();
   1570 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1571 
   1572 #ifdef PMAP_FAULTINFO
   1573 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1574 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1575 	curpcb->pcb_faultinfo.pfi_faultpte = NULL;
   1576 #endif
   1577 	kpreempt_enable();
   1578 
   1579 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1580 	return false;
   1581 }
   1582 
   1583 /*
   1584  *	Routine:	pmap_unwire
   1585  *	Function:	Clear the wired attribute for a map/virtual-address
   1586  *			pair.
   1587  *	In/out conditions:
   1588  *			The mapping must already exist in the pmap.
   1589  */
   1590 void
   1591 pmap_unwire(pmap_t pmap, vaddr_t va)
   1592 {
   1593 	UVMHIST_FUNC(__func__);
   1594 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
   1595 	    0, 0);
   1596 	PMAP_COUNT(unwire);
   1597 
   1598 	/*
   1599 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1600 	 */
   1601 	kpreempt_disable();
   1602 	pmap_addr_range_check(pmap, va, va, __func__);
   1603 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1604 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
   1605 	    pmap, va);
   1606 	pt_entry_t pte = *ptep;
   1607 	KASSERTMSG(pte_valid_p(pte),
   1608 	    "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p",
   1609 	    pmap, va, pte_value(pte), ptep);
   1610 
   1611 	if (pte_wired_p(pte)) {
   1612 		pmap_tlb_miss_lock_enter();
   1613 		pte_set(ptep, pte_unwire_entry(pte));
   1614 		pmap_tlb_miss_lock_exit();
   1615 		pmap->pm_stats.wired_count--;
   1616 	}
   1617 #ifdef DIAGNOSTIC
   1618 	else {
   1619 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1620 		    __func__, pmap, va);
   1621 	}
   1622 #endif
   1623 	kpreempt_enable();
   1624 
   1625 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1626 }
   1627 
   1628 /*
   1629  *	Routine:	pmap_extract
   1630  *	Function:
   1631  *		Extract the physical page address associated
   1632  *		with the given map/virtual_address pair.
   1633  */
   1634 bool
   1635 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1636 {
   1637 	paddr_t pa;
   1638 
   1639 	if (pmap == pmap_kernel()) {
   1640 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1641 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1642 			goto done;
   1643 		}
   1644 		if (pmap_md_io_vaddr_p(va))
   1645 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1646 
   1647 		if (va >= pmap_limits.virtual_end)
   1648 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
   1649 			    __func__, va);
   1650 	}
   1651 	kpreempt_disable();
   1652 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1653 	if (ptep == NULL || !pte_valid_p(*ptep)) {
   1654 		kpreempt_enable();
   1655 		return false;
   1656 	}
   1657 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1658 	kpreempt_enable();
   1659 done:
   1660 	if (pap != NULL) {
   1661 		*pap = pa;
   1662 	}
   1663 	return true;
   1664 }
   1665 
   1666 /*
   1667  *	Copy the range specified by src_addr/len
   1668  *	from the source map to the range dst_addr/len
   1669  *	in the destination map.
   1670  *
   1671  *	This routine is only advisory and need not do anything.
   1672  */
   1673 void
   1674 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1675     vaddr_t src_addr)
   1676 {
   1677 	UVMHIST_FUNC(__func__);
   1678 	UVMHIST_CALLED(pmaphist);
   1679 	PMAP_COUNT(copy);
   1680 }
   1681 
   1682 /*
   1683  *	pmap_clear_reference:
   1684  *
   1685  *	Clear the reference bit on the specified physical page.
   1686  */
   1687 bool
   1688 pmap_clear_reference(struct vm_page *pg)
   1689 {
   1690 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1691 
   1692 	UVMHIST_FUNC(__func__);
   1693 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
   1694 	   (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1695 
   1696 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1697 
   1698 	UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
   1699 
   1700 	return rv;
   1701 }
   1702 
   1703 /*
   1704  *	pmap_is_referenced:
   1705  *
   1706  *	Return whether or not the specified physical page is referenced
   1707  *	by any physical maps.
   1708  */
   1709 bool
   1710 pmap_is_referenced(struct vm_page *pg)
   1711 {
   1712 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1713 }
   1714 
   1715 /*
   1716  *	Clear the modify bits on the specified physical page.
   1717  */
   1718 bool
   1719 pmap_clear_modify(struct vm_page *pg)
   1720 {
   1721 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1722 	pv_entry_t pv = &mdpg->mdpg_first;
   1723 	pv_entry_t pv_next;
   1724 
   1725 	UVMHIST_FUNC(__func__);
   1726 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
   1727 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1728 	PMAP_COUNT(clear_modify);
   1729 
   1730 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1731 		if (pv->pv_pmap == NULL) {
   1732 			UVMHIST_LOG(pmapexechist,
   1733 			    "pg %#jx (pa %#jx): execpage cleared",
   1734 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1735 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1736 			PMAP_COUNT(exec_uncached_clear_modify);
   1737 		} else {
   1738 			UVMHIST_LOG(pmapexechist,
   1739 			    "pg %#jx (pa %#jx): syncicache performed",
   1740 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1741 			pmap_page_syncicache(pg);
   1742 			PMAP_COUNT(exec_synced_clear_modify);
   1743 		}
   1744 	}
   1745 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1746 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
   1747 		return false;
   1748 	}
   1749 	if (pv->pv_pmap == NULL) {
   1750 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
   1751 		return true;
   1752 	}
   1753 
   1754 	/*
   1755 	 * remove write access from any pages that are dirty
   1756 	 * so we can tell if they are written to again later.
   1757 	 * flush the VAC first if there is one.
   1758 	 */
   1759 	kpreempt_disable();
   1760 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1761 	pmap_pvlist_check(mdpg);
   1762 	for (; pv != NULL; pv = pv_next) {
   1763 		pmap_t pmap = pv->pv_pmap;
   1764 		vaddr_t va = trunc_page(pv->pv_va);
   1765 
   1766 		pv_next = pv->pv_next;
   1767 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1768 		if (PV_ISKENTER_P(pv))
   1769 			continue;
   1770 #endif
   1771 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1772 		KASSERT(ptep);
   1773 		pt_entry_t pte = pte_prot_nowrite(*ptep);
   1774 		if (*ptep == pte) {
   1775 			continue;
   1776 		}
   1777 		KASSERT(pte_valid_p(pte));
   1778 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1779 		pmap_tlb_miss_lock_enter();
   1780 		pte_set(ptep, pte);
   1781 		pmap_tlb_invalidate_addr(pmap, va);
   1782 		pmap_tlb_miss_lock_exit();
   1783 		pmap_update(pmap);
   1784 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
   1785 			/*
   1786 			 * The list changed!  So restart from the beginning.
   1787 			 */
   1788 			pv_next = &mdpg->mdpg_first;
   1789 			pmap_pvlist_check(mdpg);
   1790 		}
   1791 	}
   1792 	pmap_pvlist_check(mdpg);
   1793 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1794 	kpreempt_enable();
   1795 
   1796 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
   1797 	return true;
   1798 }
   1799 
   1800 /*
   1801  *	pmap_is_modified:
   1802  *
   1803  *	Return whether or not the specified physical page is modified
   1804  *	by any physical maps.
   1805  */
   1806 bool
   1807 pmap_is_modified(struct vm_page *pg)
   1808 {
   1809 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1810 }
   1811 
   1812 /*
   1813  *	pmap_set_modified:
   1814  *
   1815  *	Sets the page modified reference bit for the specified page.
   1816  */
   1817 void
   1818 pmap_set_modified(paddr_t pa)
   1819 {
   1820 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1821 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1822 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1823 }
   1824 
   1825 /******************** pv_entry management ********************/
   1826 
   1827 static void
   1828 pmap_pvlist_check(struct vm_page_md *mdpg)
   1829 {
   1830 #ifdef DEBUG
   1831 	pv_entry_t pv = &mdpg->mdpg_first;
   1832 	if (pv->pv_pmap != NULL) {
   1833 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1834 		const u_int colormask = uvmexp.colormask;
   1835 		u_int colors = 0;
   1836 #endif
   1837 		for (; pv != NULL; pv = pv->pv_next) {
   1838 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1839 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1840 			colors |= __BIT(atop(pv->pv_va) & colormask);
   1841 #endif
   1842 		}
   1843 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1844 		// Assert that if there is more than 1 color mapped, that the
   1845 		// page is uncached.
   1846 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
   1847 		    || colors == 0 || (colors & (colors-1)) == 0
   1848 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
   1849 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
   1850 #endif
   1851 	} else {
   1852     		KASSERT(pv->pv_next == NULL);
   1853 	}
   1854 #endif /* DEBUG */
   1855 }
   1856 
   1857 /*
   1858  * Enter the pmap and virtual address into the
   1859  * physical to virtual map table.
   1860  */
   1861 void
   1862 pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
   1863     pt_entry_t *nptep, u_int flags)
   1864 {
   1865 	pv_entry_t pv, npv, apv;
   1866 #ifdef UVMHIST
   1867 	bool first = false;
   1868 	struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
   1869 	    NULL;
   1870 #endif
   1871 
   1872 	UVMHIST_FUNC(__func__);
   1873 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
   1874 	    (uintptr_t)pmap, va, (uintptr_t)pg, pa);
   1875 	UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
   1876 	    (uintptr_t)nptep, pte_value(*nptep), 0, 0);
   1877 
   1878 	KASSERT(kpreempt_disabled());
   1879 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1880 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
   1881 	    "va %#"PRIxVADDR, va);
   1882 
   1883 	apv = NULL;
   1884 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   1885 again:
   1886 	pv = &mdpg->mdpg_first;
   1887 	pmap_pvlist_check(mdpg);
   1888 	if (pv->pv_pmap == NULL) {
   1889 		KASSERT(pv->pv_next == NULL);
   1890 		/*
   1891 		 * No entries yet, use header as the first entry
   1892 		 */
   1893 		PMAP_COUNT(primary_mappings);
   1894 		PMAP_COUNT(mappings);
   1895 #ifdef UVMHIST
   1896 		first = true;
   1897 #endif
   1898 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1899 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
   1900 		// If the new mapping has an incompatible color the last
   1901 		// mapping of this page, clean the page before using it.
   1902 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
   1903 			pmap_md_vca_clean(mdpg, PMAP_WBINV);
   1904 		}
   1905 #endif
   1906 		pv->pv_pmap = pmap;
   1907 		pv->pv_va = va | flags;
   1908 	} else {
   1909 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1910 		if (pmap_md_vca_add(mdpg, va, nptep)) {
   1911 			goto again;
   1912 		}
   1913 #endif
   1914 
   1915 		/*
   1916 		 * There is at least one other VA mapping this page.
   1917 		 * Place this entry after the header.
   1918 		 *
   1919 		 * Note: the entry may already be in the table if
   1920 		 * we are only changing the protection bits.
   1921 		 */
   1922 
   1923 		for (npv = pv; npv; npv = npv->pv_next) {
   1924 			if (pmap == npv->pv_pmap
   1925 			    && va == trunc_page(npv->pv_va)) {
   1926 #ifdef PARANOIADIAG
   1927 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1928 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
   1929 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
   1930 					printf("%s: found va %#"PRIxVADDR
   1931 					    " pa %#"PRIxPADDR
   1932 					    " in pv_table but != %#"PRIxPTE"\n",
   1933 					    __func__, va, pa, pte_value(pte));
   1934 #endif
   1935 				PMAP_COUNT(remappings);
   1936 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1937 				if (__predict_false(apv != NULL))
   1938 					pmap_pv_free(apv);
   1939 
   1940 				UVMHIST_LOG(pmaphist,
   1941 				    " <-- done pv=%#jx (reused)",
   1942 				    (uintptr_t)pv, 0, 0, 0);
   1943 				return;
   1944 			}
   1945 		}
   1946 		if (__predict_true(apv == NULL)) {
   1947 			/*
   1948 			 * To allocate a PV, we have to release the PVLIST lock
   1949 			 * so get the page generation.  We allocate the PV, and
   1950 			 * then reacquire the lock.
   1951 			 */
   1952 			pmap_pvlist_check(mdpg);
   1953 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1954 
   1955 			apv = (pv_entry_t)pmap_pv_alloc();
   1956 			if (apv == NULL)
   1957 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1958 
   1959 			/*
   1960 			 * If the generation has changed, then someone else
   1961 			 * tinkered with this page so we should start over.
   1962 			 */
   1963 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
   1964 				goto again;
   1965 		}
   1966 		npv = apv;
   1967 		apv = NULL;
   1968 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1969 		/*
   1970 		 * If need to deal with virtual cache aliases, keep mappings
   1971 		 * in the kernel pmap at the head of the list.  This allows
   1972 		 * the VCA code to easily use them for cache operations if
   1973 		 * present.
   1974 		 */
   1975 		pmap_t kpmap = pmap_kernel();
   1976 		if (pmap != kpmap) {
   1977 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
   1978 				pv = pv->pv_next;
   1979 			}
   1980 		}
   1981 #endif
   1982 		npv->pv_va = va | flags;
   1983 		npv->pv_pmap = pmap;
   1984 		npv->pv_next = pv->pv_next;
   1985 		pv->pv_next = npv;
   1986 		PMAP_COUNT(mappings);
   1987 	}
   1988 	pmap_pvlist_check(mdpg);
   1989 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1990 	if (__predict_false(apv != NULL))
   1991 		pmap_pv_free(apv);
   1992 
   1993 	UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
   1994 	    first, 0, 0);
   1995 }
   1996 
   1997 /*
   1998  * Remove a physical to virtual address translation.
   1999  * If cache was inhibited on this page, and there are no more cache
   2000  * conflicts, restore caching.
   2001  * Flush the cache if the last page is removed (should always be cached
   2002  * at this point).
   2003  */
   2004 void
   2005 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   2006 {
   2007 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2008 	pv_entry_t pv, npv;
   2009 	bool last;
   2010 
   2011 	UVMHIST_FUNC(__func__);
   2012 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
   2013 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
   2014 	UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
   2015 
   2016 	KASSERT(kpreempt_disabled());
   2017 	KASSERT((va & PAGE_MASK) == 0);
   2018 	pv = &mdpg->mdpg_first;
   2019 
   2020 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   2021 	pmap_pvlist_check(mdpg);
   2022 
   2023 	/*
   2024 	 * If it is the first entry on the list, it is actually
   2025 	 * in the header and we must copy the following entry up
   2026 	 * to the header.  Otherwise we must search the list for
   2027 	 * the entry.  In either case we free the now unused entry.
   2028 	 */
   2029 
   2030 	last = false;
   2031 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
   2032 		npv = pv->pv_next;
   2033 		if (npv) {
   2034 			*pv = *npv;
   2035 			KASSERT(pv->pv_pmap != NULL);
   2036 		} else {
   2037 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2038 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   2039 #endif
   2040 			pv->pv_pmap = NULL;
   2041 			last = true;	/* Last mapping removed */
   2042 		}
   2043 		PMAP_COUNT(remove_pvfirst);
   2044 	} else {
   2045 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   2046 			PMAP_COUNT(remove_pvsearch);
   2047 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
   2048 				break;
   2049 		}
   2050 		if (npv) {
   2051 			pv->pv_next = npv->pv_next;
   2052 		}
   2053 	}
   2054 
   2055 	pmap_pvlist_check(mdpg);
   2056 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2057 
   2058 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2059 	pmap_md_vca_remove(pg, va, dirty, last);
   2060 #endif
   2061 
   2062 	/*
   2063 	 * Free the pv_entry if needed.
   2064 	 */
   2065 	if (npv)
   2066 		pmap_pv_free(npv);
   2067 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   2068 		if (last) {
   2069 			/*
   2070 			 * If this was the page's last mapping, we no longer
   2071 			 * care about its execness.
   2072 			 */
   2073 			UVMHIST_LOG(pmapexechist,
   2074 			    "pg %#jx (pa %#jx)last %ju: execpage cleared",
   2075 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2076 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   2077 			PMAP_COUNT(exec_uncached_remove);
   2078 		} else {
   2079 			/*
   2080 			 * Someone still has it mapped as an executable page
   2081 			 * so we must sync it.
   2082 			 */
   2083 			UVMHIST_LOG(pmapexechist,
   2084 			    "pg %#jx (pa %#jx) last %ju: performed syncicache",
   2085 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2086 			pmap_page_syncicache(pg);
   2087 			PMAP_COUNT(exec_synced_remove);
   2088 		}
   2089 	}
   2090 
   2091 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   2092 }
   2093 
   2094 #if defined(MULTIPROCESSOR)
   2095 struct pmap_pvlist_info {
   2096 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   2097 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   2098 	volatile u_int pli_lock_index;
   2099 	u_int pli_lock_mask;
   2100 } pmap_pvlist_info;
   2101 
   2102 void
   2103 pmap_pvlist_lock_init(size_t cache_line_size)
   2104 {
   2105 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2106 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   2107 	vaddr_t lock_va = lock_page;
   2108 	if (sizeof(kmutex_t) > cache_line_size) {
   2109 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   2110 	}
   2111 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   2112 	KASSERT((nlocks & (nlocks - 1)) == 0);
   2113 	/*
   2114 	 * Now divide the page into a number of mutexes, one per cacheline.
   2115 	 */
   2116 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   2117 		kmutex_t * const lock = (kmutex_t *)lock_va;
   2118 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
   2119 		pli->pli_locks[i] = lock;
   2120 	}
   2121 	pli->pli_lock_mask = nlocks - 1;
   2122 }
   2123 
   2124 kmutex_t *
   2125 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2126 {
   2127 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2128 	kmutex_t *lock = mdpg->mdpg_lock;
   2129 
   2130 	/*
   2131 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   2132 	 * semi-random distribution not based on page color.
   2133 	 */
   2134 	if (__predict_false(lock == NULL)) {
   2135 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   2136 		size_t lockid = locknum & pli->pli_lock_mask;
   2137 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   2138 		/*
   2139 		 * Set the lock.  If some other thread already did, just use
   2140 		 * the one they assigned.
   2141 		 */
   2142 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   2143 		if (lock == NULL) {
   2144 			lock = new_lock;
   2145 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   2146 		}
   2147 	}
   2148 
   2149 	/*
   2150 	 * Now finally provide the lock.
   2151 	 */
   2152 	return lock;
   2153 }
   2154 #else /* !MULTIPROCESSOR */
   2155 void
   2156 pmap_pvlist_lock_init(size_t cache_line_size)
   2157 {
   2158 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
   2159 }
   2160 
   2161 #ifdef MODULAR
   2162 kmutex_t *
   2163 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2164 {
   2165 	/*
   2166 	 * We just use a global lock.
   2167 	 */
   2168 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   2169 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   2170 	}
   2171 
   2172 	/*
   2173 	 * Now finally provide the lock.
   2174 	 */
   2175 	return mdpg->mdpg_lock;
   2176 }
   2177 #endif /* MODULAR */
   2178 #endif /* !MULTIPROCESSOR */
   2179 
   2180 /*
   2181  * pmap_pv_page_alloc:
   2182  *
   2183  *	Allocate a page for the pv_entry pool.
   2184  */
   2185 void *
   2186 pmap_pv_page_alloc(struct pool *pp, int flags)
   2187 {
   2188 	struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   2189 	if (pg == NULL)
   2190 		return NULL;
   2191 
   2192 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   2193 }
   2194 
   2195 /*
   2196  * pmap_pv_page_free:
   2197  *
   2198  *	Free a pv_entry pool page.
   2199  */
   2200 void
   2201 pmap_pv_page_free(struct pool *pp, void *v)
   2202 {
   2203 	vaddr_t va = (vaddr_t)v;
   2204 
   2205 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2206 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2207 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2208 	KASSERT(pg != NULL);
   2209 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2210 	kpreempt_disable();
   2211 	pmap_md_vca_remove(pg, va, true, true);
   2212 	kpreempt_enable();
   2213 #endif
   2214 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2215 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2216 	uvm_pagefree(pg);
   2217 }
   2218 
   2219 #ifdef PMAP_PREFER
   2220 /*
   2221  * Find first virtual address >= *vap that doesn't cause
   2222  * a cache alias conflict.
   2223  */
   2224 void
   2225 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   2226 {
   2227 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   2228 
   2229 	PMAP_COUNT(prefer_requests);
   2230 
   2231 	prefer_mask |= pmap_md_cache_prefer_mask();
   2232 
   2233 	if (prefer_mask) {
   2234 		vaddr_t	va = *vap;
   2235 		vsize_t d = (foff - va) & prefer_mask;
   2236 		if (d) {
   2237 			if (td)
   2238 				*vap = trunc_page(va - ((-d) & prefer_mask));
   2239 			else
   2240 				*vap = round_page(va + d);
   2241 			PMAP_COUNT(prefer_adjustments);
   2242 		}
   2243 	}
   2244 }
   2245 #endif /* PMAP_PREFER */
   2246 
   2247 #ifdef PMAP_MAP_POOLPAGE
   2248 vaddr_t
   2249 pmap_map_poolpage(paddr_t pa)
   2250 {
   2251 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2252 	KASSERT(pg);
   2253 
   2254 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2255 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
   2256 
   2257 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   2258 
   2259 	return pmap_md_map_poolpage(pa, NBPG);
   2260 }
   2261 
   2262 paddr_t
   2263 pmap_unmap_poolpage(vaddr_t va)
   2264 {
   2265 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2266 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2267 
   2268 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2269 	KASSERT(pg != NULL);
   2270 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2271 
   2272 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2273 	pmap_md_unmap_poolpage(va, NBPG);
   2274 
   2275 	return pa;
   2276 }
   2277 #endif /* PMAP_MAP_POOLPAGE */
   2278