Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.73
      1 /*	$NetBSD: pmap.c,v 1.73 2022/11/02 08:05:17 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2022/11/02 08:05:17 skrll Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #include "opt_ddb.h"
     99 #include "opt_efi.h"
    100 #include "opt_modular.h"
    101 #include "opt_multiprocessor.h"
    102 #include "opt_sysv.h"
    103 #include "opt_uvmhist.h"
    104 
    105 #define __PMAP_PRIVATE
    106 
    107 #include <sys/param.h>
    108 
    109 #include <sys/asan.h>
    110 #include <sys/atomic.h>
    111 #include <sys/buf.h>
    112 #include <sys/cpu.h>
    113 #include <sys/mutex.h>
    114 #include <sys/pool.h>
    115 
    116 #include <uvm/uvm.h>
    117 #include <uvm/uvm_physseg.h>
    118 #include <uvm/pmap/pmap_pvt.h>
    119 
    120 #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    121     && !defined(PMAP_NO_PV_UNCACHED)
    122 #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
    123  PMAP_NO_PV_UNCACHED to be defined
    124 #endif
    125 
    126 #if defined(PMAP_PV_TRACK_ONLY_STUBS)
    127 #undef	__HAVE_PMAP_PV_TRACK
    128 #endif
    129 
    130 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    131 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    132 PMAP_COUNTER(remove_user_calls, "remove user calls");
    133 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    134 PMAP_COUNTER(remove_flushes, "remove cache flushes");
    135 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    136 PMAP_COUNTER(remove_pvfirst, "remove pv first");
    137 PMAP_COUNTER(remove_pvsearch, "remove pv search");
    138 
    139 PMAP_COUNTER(prefer_requests, "prefer requests");
    140 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    141 
    142 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    143 
    144 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    145 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    146 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    147 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    148 
    149 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    150 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    151 
    152 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    153 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    154 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    155 PMAP_COUNTER(user_mappings, "user pages mapped");
    156 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    157 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    158 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    159 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    160 PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
    161 PMAP_COUNTER(efirt_mappings, "EFI RT pages mapped");
    162 PMAP_COUNTER(managed_mappings, "managed pages mapped");
    163 PMAP_COUNTER(mappings, "pages mapped");
    164 PMAP_COUNTER(remappings, "pages remapped");
    165 PMAP_COUNTER(unmappings, "pages unmapped");
    166 PMAP_COUNTER(primary_mappings, "page initial mappings");
    167 PMAP_COUNTER(primary_unmappings, "page final unmappings");
    168 PMAP_COUNTER(tlb_hit, "page mapping");
    169 
    170 PMAP_COUNTER(exec_mappings, "exec pages mapped");
    171 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    172 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    173 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    174 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    175 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    176 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    177 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    178 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    179 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    180 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    181 
    182 PMAP_COUNTER(create, "creates");
    183 PMAP_COUNTER(reference, "references");
    184 PMAP_COUNTER(dereference, "dereferences");
    185 PMAP_COUNTER(destroy, "destroyed");
    186 PMAP_COUNTER(activate, "activations");
    187 PMAP_COUNTER(deactivate, "deactivations");
    188 PMAP_COUNTER(update, "updates");
    189 #ifdef MULTIPROCESSOR
    190 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    191 #endif
    192 PMAP_COUNTER(unwire, "unwires");
    193 PMAP_COUNTER(copy, "copies");
    194 PMAP_COUNTER(clear_modify, "clear_modifies");
    195 PMAP_COUNTER(protect, "protects");
    196 PMAP_COUNTER(page_protect, "page_protects");
    197 
    198 #define PMAP_ASID_RESERVED 0
    199 CTASSERT(PMAP_ASID_RESERVED == 0);
    200 
    201 #ifdef PMAP_HWPAGEWALKER
    202 #ifndef PMAP_PDETAB_ALIGN
    203 #define PMAP_PDETAB_ALIGN	/* nothing */
    204 #endif
    205 
    206 #ifdef _LP64
    207 pmap_pdetab_t	pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */
    208 #endif
    209 pmap_pdetab_t	pmap_kern_pdetab PMAP_PDETAB_ALIGN;
    210 #endif
    211 
    212 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    213 #ifndef PMAP_SEGTAB_ALIGN
    214 #define PMAP_SEGTAB_ALIGN	/* nothing */
    215 #endif
    216 #ifdef _LP64
    217 pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
    218 #endif
    219 pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
    220 #ifdef _LP64
    221 	.seg_seg[(VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NSEGPG - 1)] = &pmap_kstart_segtab,
    222 #endif
    223 };
    224 #endif
    225 
    226 struct pmap_kernel kernel_pmap_store = {
    227 	.kernel_pmap = {
    228 		.pm_refcnt = 1,
    229 #ifdef PMAP_HWPAGEWALKER
    230 		.pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS,
    231 #endif
    232 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    233 		.pm_segtab = &pmap_kern_segtab,
    234 #endif
    235 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    236 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    237 	},
    238 };
    239 
    240 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    241 
    242 #if defined(EFI_RUNTIME)
    243 static struct pmap efirt_pmap;
    244 
    245 pmap_t
    246 pmap_efirt(void)
    247 {
    248 	return &efirt_pmap;
    249 }
    250 #else
    251 static inline pt_entry_t
    252 pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
    253 {
    254 	panic("not supported");
    255 }
    256 #endif
    257 
    258 /* The current top of kernel VM - gets updated by pmap_growkernel */
    259 vaddr_t pmap_curmaxkvaddr;
    260 
    261 struct pmap_limits pmap_limits = {	/* VA and PA limits */
    262 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    263 	.virtual_end = VM_MAX_KERNEL_ADDRESS,
    264 };
    265 
    266 #ifdef UVMHIST
    267 static struct kern_history_ent pmapexechistbuf[10000];
    268 static struct kern_history_ent pmaphistbuf[10000];
    269 static struct kern_history_ent pmapxtabhistbuf[5000];
    270 UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf);
    271 UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf);
    272 UVMHIST_DEFINE(pmapxtabhist) = UVMHIST_INITIALIZER(pmapxtabhist, pmapxtabhistbuf);
    273 #endif
    274 
    275 /*
    276  * The pools from which pmap structures and sub-structures are allocated.
    277  */
    278 struct pool pmap_pmap_pool;
    279 struct pool pmap_pv_pool;
    280 
    281 #ifndef PMAP_PV_LOWAT
    282 #define	PMAP_PV_LOWAT	16
    283 #endif
    284 int	pmap_pv_lowat = PMAP_PV_LOWAT;
    285 
    286 bool	pmap_initialized = false;
    287 #define	PMAP_PAGE_COLOROK_P(a, b) \
    288 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    289 u_int	pmap_page_colormask;
    290 
    291 #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    292 
    293 #define PMAP_IS_ACTIVE(pm)						\
    294 	((pm) == pmap_kernel() ||					\
    295 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    296 
    297 /* Forward function declarations */
    298 void pmap_page_remove(struct vm_page_md *);
    299 static void pmap_pvlist_check(struct vm_page_md *);
    300 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    301 void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
    302 
    303 /*
    304  * PV table management functions.
    305  */
    306 void	*pmap_pv_page_alloc(struct pool *, int);
    307 void	pmap_pv_page_free(struct pool *, void *);
    308 
    309 struct pool_allocator pmap_pv_page_allocator = {
    310 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    311 };
    312 
    313 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    314 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    315 
    316 #ifndef PMAP_NEED_TLB_MISS_LOCK
    317 
    318 #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
    319 #define	PMAP_NEED_TLB_MISS_LOCK
    320 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
    321 
    322 #endif /* PMAP_NEED_TLB_MISS_LOCK */
    323 
    324 #ifdef PMAP_NEED_TLB_MISS_LOCK
    325 
    326 #ifdef PMAP_MD_NEED_TLB_MISS_LOCK
    327 #define	pmap_tlb_miss_lock_init()	__nothing /* MD code deals with this */
    328 #define	pmap_tlb_miss_lock_enter()	pmap_md_tlb_miss_lock_enter()
    329 #define	pmap_tlb_miss_lock_exit()	pmap_md_tlb_miss_lock_exit()
    330 #else
    331 kmutex_t pmap_tlb_miss_lock		__cacheline_aligned;
    332 
    333 static void
    334 pmap_tlb_miss_lock_init(void)
    335 {
    336 	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
    337 }
    338 
    339 static inline void
    340 pmap_tlb_miss_lock_enter(void)
    341 {
    342 	mutex_spin_enter(&pmap_tlb_miss_lock);
    343 }
    344 
    345 static inline void
    346 pmap_tlb_miss_lock_exit(void)
    347 {
    348 	mutex_spin_exit(&pmap_tlb_miss_lock);
    349 }
    350 #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
    351 
    352 #else
    353 
    354 #define	pmap_tlb_miss_lock_init()	__nothing
    355 #define	pmap_tlb_miss_lock_enter()	__nothing
    356 #define	pmap_tlb_miss_lock_exit()	__nothing
    357 
    358 #endif /* PMAP_NEED_TLB_MISS_LOCK */
    359 
    360 #ifndef MULTIPROCESSOR
    361 kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
    362 #endif
    363 
    364 /*
    365  * Debug functions.
    366  */
    367 
    368 #ifdef DEBUG
    369 static inline void
    370 pmap_asid_check(pmap_t pm, const char *func)
    371 {
    372 	if (!PMAP_IS_ACTIVE(pm))
    373 		return;
    374 
    375 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
    376 	tlb_asid_t asid = tlb_get_asid();
    377 	if (asid != pai->pai_asid)
    378 		panic("%s: inconsistency for active TLB update: %u <-> %u",
    379 		    func, asid, pai->pai_asid);
    380 }
    381 #endif
    382 
    383 static void
    384 pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
    385 {
    386 #ifdef DEBUG
    387 	if (pmap == pmap_kernel()) {
    388 		if (sva < VM_MIN_KERNEL_ADDRESS)
    389 			panic("%s: kva %#"PRIxVADDR" not in range",
    390 			    func, sva);
    391 		if (eva >= pmap_limits.virtual_end)
    392 			panic("%s: kva %#"PRIxVADDR" not in range",
    393 			    func, eva);
    394 	} else {
    395 		if (eva > VM_MAXUSER_ADDRESS)
    396 			panic("%s: uva %#"PRIxVADDR" not in range",
    397 			    func, eva);
    398 		pmap_asid_check(pmap, func);
    399 	}
    400 #endif
    401 }
    402 
    403 /*
    404  * Misc. functions.
    405  */
    406 
    407 bool
    408 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    409 {
    410 	volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
    411 
    412 #ifdef MULTIPROCESSOR
    413 	for (;;) {
    414 		u_int old_attr = *attrp;
    415 		if ((old_attr & clear_attributes) == 0)
    416 			return false;
    417 		u_int new_attr = old_attr & ~clear_attributes;
    418 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
    419 			return true;
    420 	}
    421 #else
    422 	unsigned long old_attr = *attrp;
    423 	if ((old_attr & clear_attributes) == 0)
    424 		return false;
    425 	*attrp &= ~clear_attributes;
    426 	return true;
    427 #endif
    428 }
    429 
    430 void
    431 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    432 {
    433 #ifdef MULTIPROCESSOR
    434 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
    435 #else
    436 	mdpg->mdpg_attrs |= set_attributes;
    437 #endif
    438 }
    439 
    440 static void
    441 pmap_page_syncicache(struct vm_page *pg)
    442 {
    443 	UVMHIST_FUNC(__func__);
    444 	UVMHIST_CALLED(pmaphist);
    445 #ifndef MULTIPROCESSOR
    446 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
    447 #endif
    448 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    449 	pv_entry_t pv = &mdpg->mdpg_first;
    450 	kcpuset_t *onproc;
    451 #ifdef MULTIPROCESSOR
    452 	kcpuset_create(&onproc, true);
    453 	KASSERT(onproc != NULL);
    454 #else
    455 	onproc = NULL;
    456 #endif
    457 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
    458 	pmap_pvlist_check(mdpg);
    459 
    460 	UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv,
    461 	    (uintptr_t)pv->pv_pmap, 0, 0);
    462 
    463 	if (pv->pv_pmap != NULL) {
    464 		for (; pv != NULL; pv = pv->pv_next) {
    465 #ifdef MULTIPROCESSOR
    466 			UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx",
    467 			    (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0);
    468 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    469 			if (kcpuset_match(onproc, kcpuset_running)) {
    470 				break;
    471 			}
    472 #else
    473 			if (pv->pv_pmap == curpmap) {
    474 				onproc = curcpu()->ci_data.cpu_kcpuset;
    475 				break;
    476 			}
    477 #endif
    478 		}
    479 	}
    480 	pmap_pvlist_check(mdpg);
    481 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    482 	kpreempt_disable();
    483 	pmap_md_page_syncicache(mdpg, onproc);
    484 	kpreempt_enable();
    485 #ifdef MULTIPROCESSOR
    486 	kcpuset_destroy(onproc);
    487 #endif
    488 }
    489 
    490 /*
    491  * Define the initial bounds of the kernel virtual address space.
    492  */
    493 void
    494 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    495 {
    496 	*vstartp = pmap_limits.virtual_start;
    497 	*vendp = pmap_limits.virtual_end;
    498 }
    499 
    500 vaddr_t
    501 pmap_growkernel(vaddr_t maxkvaddr)
    502 {
    503 	UVMHIST_FUNC(__func__);
    504 	UVMHIST_CALLARGS(pmaphist, "maxkvaddr=%#jx (%#jx)", maxkvaddr,
    505 	    pmap_curmaxkvaddr, 0, 0);
    506 
    507 	vaddr_t virtual_end = pmap_curmaxkvaddr;
    508 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    509 
    510 	/*
    511 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    512 	 */
    513 	if (maxkvaddr == 0 || maxkvaddr > VM_MAX_KERNEL_ADDRESS)
    514 		maxkvaddr = VM_MAX_KERNEL_ADDRESS;
    515 
    516 	/*
    517 	 * Reserve PTEs for the new KVA space.
    518 	 */
    519 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    520 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    521 	}
    522 
    523 	kasan_shadow_map((void *)pmap_curmaxkvaddr,
    524 	    (size_t)(virtual_end - pmap_curmaxkvaddr));
    525 
    526 	/*
    527 	 * Update new end.
    528 	 */
    529 	pmap_curmaxkvaddr = virtual_end;
    530 
    531 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    532 
    533 	return virtual_end;
    534 }
    535 
    536 /*
    537  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    538  * This function allows for early dynamic memory allocation until the virtual
    539  * memory system has been bootstrapped.  After that point, either kmem_alloc
    540  * or malloc should be used.  This function works by stealing pages from the
    541  * (to be) managed page pool, then implicitly mapping the pages (by using
    542  * their direct mapped addresses) and zeroing them.
    543  *
    544  * It may be used once the physical memory segments have been pre-loaded
    545  * into the vm_physmem[] array.  Early memory allocation MUST use this
    546  * interface!  This cannot be used after vm_page_startup(), and will
    547  * generate a panic if tried.
    548  *
    549  * Note that this memory will never be freed, and in essence it is wired
    550  * down.
    551  *
    552  * We must adjust *vstartp and/or *vendp iff we use address space
    553  * from the kernel virtual address range defined by pmap_virtual_space().
    554  */
    555 vaddr_t
    556 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    557 {
    558 	size_t npgs;
    559 	paddr_t pa;
    560 	vaddr_t va;
    561 
    562 	uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
    563 
    564 	size = round_page(size);
    565 	npgs = atop(size);
    566 
    567 	aprint_debug("%s: need %zu pages\n", __func__, npgs);
    568 
    569 	for (uvm_physseg_t bank = uvm_physseg_get_first();
    570 	     uvm_physseg_valid_p(bank);
    571 	     bank = uvm_physseg_get_next(bank)) {
    572 
    573 		if (uvm.page_init_done == true)
    574 			panic("pmap_steal_memory: called _after_ bootstrap");
    575 
    576 		aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
    577 		    __func__, bank,
    578 		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
    579 		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
    580 
    581 		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
    582 		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
    583 			aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
    584 			continue;
    585 		}
    586 
    587 		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
    588 			aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
    589 			    __func__, bank, npgs);
    590 			continue;
    591 		}
    592 
    593 		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
    594 			continue;
    595 		}
    596 
    597 		/*
    598 		 * Always try to allocate from the segment with the least
    599 		 * amount of space left.
    600 		 */
    601 #define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
    602 		if (uvm_physseg_valid_p(maybe_bank) == false
    603 		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
    604 			maybe_bank = bank;
    605 		}
    606 	}
    607 
    608 	if (uvm_physseg_valid_p(maybe_bank)) {
    609 		const uvm_physseg_t bank = maybe_bank;
    610 
    611 		/*
    612 		 * There are enough pages here; steal them!
    613 		 */
    614 		pa = ptoa(uvm_physseg_get_start(bank));
    615 		uvm_physseg_unplug(atop(pa), npgs);
    616 
    617 		aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
    618 		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
    619 
    620 		va = pmap_md_map_poolpage(pa, size);
    621 		memset((void *)va, 0, size);
    622 		return va;
    623 	}
    624 
    625 	/*
    626 	 * If we got here, there was no memory left.
    627 	 */
    628 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
    629 }
    630 
    631 /*
    632  *	Bootstrap the system enough to run with virtual memory.
    633  *	(Common routine called by machine-dependent bootstrap code.)
    634  */
    635 void
    636 pmap_bootstrap_common(void)
    637 {
    638 	UVMHIST_LINK_STATIC(pmapexechist);
    639 	UVMHIST_LINK_STATIC(pmaphist);
    640 	UVMHIST_LINK_STATIC(pmapxtabhist);
    641 
    642 	static const struct uvm_pagerops pmap_pager = {
    643 		/* nothing */
    644 	};
    645 
    646 	pmap_t pm = pmap_kernel();
    647 
    648 	rw_init(&pm->pm_obj_lock);
    649 	uvm_obj_init(&pm->pm_uobject, &pmap_pager, false, 1);
    650 	uvm_obj_setlock(&pm->pm_uobject, &pm->pm_obj_lock);
    651 
    652 	TAILQ_INIT(&pm->pm_ppg_list);
    653 
    654 #if defined(PMAP_HWPAGEWALKER)
    655 	TAILQ_INIT(&pm->pm_pdetab_list);
    656 #endif
    657 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    658 	TAILQ_INIT(&pm->pm_segtab_list);
    659 #endif
    660 
    661 	pmap_tlb_miss_lock_init();
    662 }
    663 
    664 /*
    665  *	Initialize the pmap module.
    666  *	Called by vm_init, to initialize any structures that the pmap
    667  *	system needs to map virtual memory.
    668  */
    669 void
    670 pmap_init(void)
    671 {
    672 	UVMHIST_FUNC(__func__);
    673 	UVMHIST_CALLED(pmaphist);
    674 
    675 	/*
    676 	 * Initialize the segtab lock.
    677 	 */
    678 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    679 
    680 	/*
    681 	 * Set a low water mark on the pv_entry pool, so that we are
    682 	 * more likely to have these around even in extreme memory
    683 	 * starvation.
    684 	 */
    685 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    686 
    687 	/*
    688 	 * Set the page colormask but allow pmap_md_init to override it.
    689 	 */
    690 	pmap_page_colormask = ptoa(uvmexp.colormask);
    691 
    692 	pmap_md_init();
    693 
    694 	/*
    695 	 * Now it is safe to enable pv entry recording.
    696 	 */
    697 	pmap_initialized = true;
    698 }
    699 
    700 /*
    701  *	Create and return a physical map.
    702  *
    703  *	If the size specified for the map
    704  *	is zero, the map is an actual physical
    705  *	map, and may be referenced by the
    706  *	hardware.
    707  *
    708  *	If the size specified is non-zero,
    709  *	the map will be used in software only, and
    710  *	is bounded by that size.
    711  */
    712 pmap_t
    713 pmap_create(void)
    714 {
    715 	UVMHIST_FUNC(__func__);
    716 	UVMHIST_CALLED(pmaphist);
    717 	PMAP_COUNT(create);
    718 
    719 	static const struct uvm_pagerops pmap_pager = {
    720 		/* nothing */
    721 	};
    722 
    723 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    724 	memset(pmap, 0, PMAP_SIZE);
    725 
    726 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    727 
    728 	pmap->pm_refcnt = 1;
    729 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    730 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    731 
    732 	rw_init(&pmap->pm_obj_lock);
    733 	uvm_obj_init(&pmap->pm_uobject, &pmap_pager, false, 1);
    734 	uvm_obj_setlock(&pmap->pm_uobject, &pmap->pm_obj_lock);
    735 
    736 	TAILQ_INIT(&pmap->pm_ppg_list);
    737 #if defined(PMAP_HWPAGEWALKER)
    738 	TAILQ_INIT(&pmap->pm_pdetab_list);
    739 #endif
    740 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    741 	TAILQ_INIT(&pmap->pm_segtab_list);
    742 #endif
    743 
    744 	pmap_segtab_init(pmap);
    745 
    746 #ifdef MULTIPROCESSOR
    747 	kcpuset_create(&pmap->pm_active, true);
    748 	kcpuset_create(&pmap->pm_onproc, true);
    749 	KASSERT(pmap->pm_active != NULL);
    750 	KASSERT(pmap->pm_onproc != NULL);
    751 #endif
    752 
    753 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
    754 	    0, 0, 0);
    755 
    756 	return pmap;
    757 }
    758 
    759 /*
    760  *	Retire the given physical map from service.
    761  *	Should only be called if the map contains
    762  *	no valid mappings.
    763  */
    764 void
    765 pmap_destroy(pmap_t pmap)
    766 {
    767 	UVMHIST_FUNC(__func__);
    768 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    769 	UVMHIST_CALLARGS(pmapxtabhist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    770 
    771 	membar_release();
    772 	if (atomic_dec_uint_nv(&pmap->pm_refcnt) > 0) {
    773 		PMAP_COUNT(dereference);
    774 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
    775 		UVMHIST_LOG(pmapxtabhist, " <-- done (deref)", 0, 0, 0, 0);
    776 		return;
    777 	}
    778 	membar_acquire();
    779 
    780 	PMAP_COUNT(destroy);
    781 	KASSERT(pmap->pm_refcnt == 0);
    782 	kpreempt_disable();
    783 	pmap_tlb_miss_lock_enter();
    784 	pmap_tlb_asid_release_all(pmap);
    785 	pmap_tlb_miss_lock_exit();
    786 	pmap_segtab_destroy(pmap, NULL, 0);
    787 
    788 	KASSERT(TAILQ_EMPTY(&pmap->pm_ppg_list));
    789 
    790 #ifdef _LP64
    791 #if defined(PMAP_HWPAGEWALKER)
    792 	KASSERT(TAILQ_EMPTY(&pmap->pm_pdetab_list));
    793 #endif
    794 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    795 	KASSERT(TAILQ_EMPTY(&pmap->pm_segtab_list));
    796 #endif
    797 #endif
    798 	KASSERT(pmap->pm_uobject.uo_npages == 0);
    799 
    800 	uvm_obj_destroy(&pmap->pm_uobject, false);
    801 	rw_destroy(&pmap->pm_obj_lock);
    802 
    803 #ifdef MULTIPROCESSOR
    804 	kcpuset_destroy(pmap->pm_active);
    805 	kcpuset_destroy(pmap->pm_onproc);
    806 	pmap->pm_active = NULL;
    807 	pmap->pm_onproc = NULL;
    808 #endif
    809 
    810 	pool_put(&pmap_pmap_pool, pmap);
    811 	kpreempt_enable();
    812 
    813 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
    814 	UVMHIST_LOG(pmapxtabhist, " <-- done (freed)", 0, 0, 0, 0);
    815 }
    816 
    817 /*
    818  *	Add a reference to the specified pmap.
    819  */
    820 void
    821 pmap_reference(pmap_t pmap)
    822 {
    823 	UVMHIST_FUNC(__func__);
    824 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    825 	PMAP_COUNT(reference);
    826 
    827 	if (pmap != NULL) {
    828 		atomic_inc_uint(&pmap->pm_refcnt);
    829 	}
    830 
    831 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    832 }
    833 
    834 /*
    835  *	Make a new pmap (vmspace) active for the given process.
    836  */
    837 void
    838 pmap_activate(struct lwp *l)
    839 {
    840 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    841 
    842 	UVMHIST_FUNC(__func__);
    843 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    844 	    (uintptr_t)pmap, 0, 0);
    845 	PMAP_COUNT(activate);
    846 
    847 	kpreempt_disable();
    848 	pmap_tlb_miss_lock_enter();
    849 	pmap_tlb_asid_acquire(pmap, l);
    850 	pmap_segtab_activate(pmap, l);
    851 	pmap_tlb_miss_lock_exit();
    852 	kpreempt_enable();
    853 
    854 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    855 	    l->l_lid, 0, 0);
    856 }
    857 
    858 /*
    859  * Remove this page from all physical maps in which it resides.
    860  * Reflects back modify bits to the pager.
    861  */
    862 void
    863 pmap_page_remove(struct vm_page_md *mdpg)
    864 {
    865 	kpreempt_disable();
    866 	VM_PAGEMD_PVLIST_LOCK(mdpg);
    867 	pmap_pvlist_check(mdpg);
    868 
    869 	struct vm_page * const pg =
    870 	    VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
    871 
    872 	UVMHIST_FUNC(__func__);
    873 	if (pg) {
    874 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
    875 		    "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
    876 		    VM_PAGE_TO_PHYS(pg), 0);
    877 	} else {
    878 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
    879 		    0, 0);
    880 	}
    881 
    882 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    883 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE | VM_PAGEMD_UNCACHED);
    884 #else
    885 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
    886 #endif
    887 	PMAP_COUNT(exec_uncached_remove);
    888 
    889 	pv_entry_t pv = &mdpg->mdpg_first;
    890 	if (pv->pv_pmap == NULL) {
    891 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    892 		kpreempt_enable();
    893 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
    894 		return;
    895 	}
    896 
    897 	pv_entry_t npv;
    898 	pv_entry_t pvp = NULL;
    899 
    900 	for (; pv != NULL; pv = npv) {
    901 		npv = pv->pv_next;
    902 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    903 		if (PV_ISKENTER_P(pv)) {
    904 			UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    905 			    " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
    906 			    pv->pv_va, 0);
    907 
    908 			KASSERT(pv->pv_pmap == pmap_kernel());
    909 
    910 			/* Assume no more - it'll get fixed if there are */
    911 			pv->pv_next = NULL;
    912 
    913 			/*
    914 			 * pvp is non-null when we already have a PV_KENTER
    915 			 * pv in pvh_first; otherwise we haven't seen a
    916 			 * PV_KENTER pv and we need to copy this one to
    917 			 * pvh_first
    918 			 */
    919 			if (pvp) {
    920 				/*
    921 				 * The previous PV_KENTER pv needs to point to
    922 				 * this PV_KENTER pv
    923 				 */
    924 				pvp->pv_next = pv;
    925 			} else {
    926 				pv_entry_t fpv = &mdpg->mdpg_first;
    927 				*fpv = *pv;
    928 				KASSERT(fpv->pv_pmap == pmap_kernel());
    929 			}
    930 			pvp = pv;
    931 			continue;
    932 		}
    933 #endif
    934 		const pmap_t pmap = pv->pv_pmap;
    935 		vaddr_t va = trunc_page(pv->pv_va);
    936 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    937 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
    938 		    pmap_limits.virtual_end);
    939 		pt_entry_t pte = *ptep;
    940 		UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    941 		    " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va,
    942 		    pte_value(pte));
    943 		if (!pte_valid_p(pte))
    944 			continue;
    945 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    946 		if (is_kernel_pmap_p) {
    947 			PMAP_COUNT(remove_kernel_pages);
    948 		} else {
    949 			PMAP_COUNT(remove_user_pages);
    950 		}
    951 		if (pte_wired_p(pte))
    952 			pmap->pm_stats.wired_count--;
    953 		pmap->pm_stats.resident_count--;
    954 
    955 		pmap_tlb_miss_lock_enter();
    956 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    957 		pte_set(ptep, npte);
    958 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    959 			/*
    960 			 * Flush the TLB for the given address.
    961 			 */
    962 			pmap_tlb_invalidate_addr(pmap, va);
    963 		}
    964 		pmap_tlb_miss_lock_exit();
    965 
    966 		/*
    967 		 * non-null means this is a non-pvh_first pv, so we should
    968 		 * free it.
    969 		 */
    970 		if (pvp) {
    971 			KASSERT(pvp->pv_pmap == pmap_kernel());
    972 			KASSERT(pvp->pv_next == NULL);
    973 			pmap_pv_free(pv);
    974 		} else {
    975 			pv->pv_pmap = NULL;
    976 			pv->pv_next = NULL;
    977 		}
    978 	}
    979 
    980 	pmap_pvlist_check(mdpg);
    981 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    982 	kpreempt_enable();
    983 
    984 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    985 }
    986 
    987 #ifdef __HAVE_PMAP_PV_TRACK
    988 /*
    989  * pmap_pv_protect: change protection of an unmanaged pv-tracked page from
    990  * all pmaps that map it
    991  */
    992 void
    993 pmap_pv_protect(paddr_t pa, vm_prot_t prot)
    994 {
    995 
    996 	/* the only case is remove at the moment */
    997 	KASSERT(prot == VM_PROT_NONE);
    998 	struct pmap_page *pp;
    999 
   1000 	pp = pmap_pv_tracked(pa);
   1001 	if (pp == NULL)
   1002 		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
   1003 		    pa);
   1004 
   1005 	struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
   1006 	pmap_page_remove(mdpg);
   1007 }
   1008 #endif
   1009 
   1010 /*
   1011  *	Make a previously active pmap (vmspace) inactive.
   1012  */
   1013 void
   1014 pmap_deactivate(struct lwp *l)
   1015 {
   1016 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1017 
   1018 	UVMHIST_FUNC(__func__);
   1019 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
   1020 	    (uintptr_t)pmap, 0, 0);
   1021 	PMAP_COUNT(deactivate);
   1022 
   1023 	kpreempt_disable();
   1024 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
   1025 	pmap_tlb_miss_lock_enter();
   1026 	pmap_tlb_asid_deactivate(pmap);
   1027 	pmap_segtab_deactivate(pmap);
   1028 	pmap_tlb_miss_lock_exit();
   1029 	kpreempt_enable();
   1030 
   1031 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
   1032 	    l->l_lid, 0, 0);
   1033 }
   1034 
   1035 void
   1036 pmap_update(struct pmap *pmap)
   1037 {
   1038 	UVMHIST_FUNC(__func__);
   1039 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1040 	PMAP_COUNT(update);
   1041 
   1042 	kpreempt_disable();
   1043 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
   1044 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
   1045 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
   1046 		PMAP_COUNT(shootdown_ipis);
   1047 #endif
   1048 	pmap_tlb_miss_lock_enter();
   1049 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
   1050 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
   1051 #endif /* DEBUG */
   1052 
   1053 	/*
   1054 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
   1055 	 * our ASID.  Now we have to reactivate ourselves.
   1056 	 */
   1057 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
   1058 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
   1059 		pmap_tlb_asid_acquire(pmap, curlwp);
   1060 		pmap_segtab_activate(pmap, curlwp);
   1061 	}
   1062 	pmap_tlb_miss_lock_exit();
   1063 	kpreempt_enable();
   1064 
   1065 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%jd)",
   1066 		    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
   1067 }
   1068 
   1069 /*
   1070  *	Remove the given range of addresses from the specified map.
   1071  *
   1072  *	It is assumed that the start and end are properly
   1073  *	rounded to the page size.
   1074  */
   1075 
   1076 static bool
   1077 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1078     uintptr_t flags)
   1079 {
   1080 	const pt_entry_t npte = flags;
   1081 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1082 
   1083 	UVMHIST_FUNC(__func__);
   1084 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
   1085 	    (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
   1086 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1087 	    (uintptr_t)ptep, flags, 0, 0);
   1088 
   1089 	KASSERT(kpreempt_disabled());
   1090 
   1091 	for (; sva < eva; sva += NBPG, ptep++) {
   1092 		const pt_entry_t pte = *ptep;
   1093 		if (!pte_valid_p(pte))
   1094 			continue;
   1095 		if (is_kernel_pmap_p) {
   1096 			PMAP_COUNT(remove_kernel_pages);
   1097 		} else {
   1098 			PMAP_COUNT(remove_user_pages);
   1099 		}
   1100 		if (pte_wired_p(pte))
   1101 			pmap->pm_stats.wired_count--;
   1102 		pmap->pm_stats.resident_count--;
   1103 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1104 		if (__predict_true(pg != NULL)) {
   1105 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
   1106 		}
   1107 		pmap_tlb_miss_lock_enter();
   1108 		pte_set(ptep, npte);
   1109 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
   1110 			/*
   1111 			 * Flush the TLB for the given address.
   1112 			 */
   1113 			pmap_tlb_invalidate_addr(pmap, sva);
   1114 		}
   1115 		pmap_tlb_miss_lock_exit();
   1116 	}
   1117 
   1118 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1119 
   1120 	return false;
   1121 }
   1122 
   1123 void
   1124 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
   1125 {
   1126 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1127 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
   1128 
   1129 	UVMHIST_FUNC(__func__);
   1130 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
   1131 	    (uintptr_t)pmap, sva, eva, 0);
   1132 
   1133 	if (is_kernel_pmap_p) {
   1134 		PMAP_COUNT(remove_kernel_calls);
   1135 	} else {
   1136 		PMAP_COUNT(remove_user_calls);
   1137 	}
   1138 #ifdef PMAP_FAULTINFO
   1139 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1140 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1141 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
   1142 #endif
   1143 	kpreempt_disable();
   1144 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1145 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
   1146 	kpreempt_enable();
   1147 
   1148 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1149 }
   1150 
   1151 /*
   1152  *	pmap_page_protect:
   1153  *
   1154  *	Lower the permission for all mappings to a given page.
   1155  */
   1156 void
   1157 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1158 {
   1159 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1160 	pv_entry_t pv;
   1161 	vaddr_t va;
   1162 
   1163 	UVMHIST_FUNC(__func__);
   1164 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
   1165 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
   1166 	PMAP_COUNT(page_protect);
   1167 
   1168 	switch (prot) {
   1169 	case VM_PROT_READ | VM_PROT_WRITE:
   1170 	case VM_PROT_ALL:
   1171 		break;
   1172 
   1173 	/* copy_on_write */
   1174 	case VM_PROT_READ:
   1175 	case VM_PROT_READ | VM_PROT_EXECUTE:
   1176 		pv = &mdpg->mdpg_first;
   1177 		kpreempt_disable();
   1178 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1179 		pmap_pvlist_check(mdpg);
   1180 		/*
   1181 		 * Loop over all current mappings setting/clearing as
   1182 		 * appropriate.
   1183 		 */
   1184 		if (pv->pv_pmap != NULL) {
   1185 			while (pv != NULL) {
   1186 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1187 				if (PV_ISKENTER_P(pv)) {
   1188 					pv = pv->pv_next;
   1189 					continue;
   1190 				}
   1191 #endif
   1192 				const pmap_t pmap = pv->pv_pmap;
   1193 				va = trunc_page(pv->pv_va);
   1194 				const uintptr_t gen =
   1195 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1196 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
   1197 				KASSERT(pv->pv_pmap == pmap);
   1198 				pmap_update(pmap);
   1199 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
   1200 					pv = &mdpg->mdpg_first;
   1201 				} else {
   1202 					pv = pv->pv_next;
   1203 				}
   1204 				pmap_pvlist_check(mdpg);
   1205 			}
   1206 		}
   1207 		pmap_pvlist_check(mdpg);
   1208 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1209 		kpreempt_enable();
   1210 		break;
   1211 
   1212 	/* remove_all */
   1213 	default:
   1214 		pmap_page_remove(mdpg);
   1215 	}
   1216 
   1217 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1218 }
   1219 
   1220 static bool
   1221 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1222 	uintptr_t flags)
   1223 {
   1224 	const vm_prot_t prot = (flags & VM_PROT_ALL);
   1225 
   1226 	UVMHIST_FUNC(__func__);
   1227 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
   1228 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
   1229 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1230 	    (uintptr_t)ptep, flags, 0, 0);
   1231 
   1232 	KASSERT(kpreempt_disabled());
   1233 	/*
   1234 	 * Change protection on every valid mapping within this segment.
   1235 	 */
   1236 	for (; sva < eva; sva += NBPG, ptep++) {
   1237 		pt_entry_t pte = *ptep;
   1238 		if (!pte_valid_p(pte))
   1239 			continue;
   1240 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1241 		if (pg != NULL && pte_modified_p(pte)) {
   1242 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1243 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1244 				KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg));
   1245 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1246 				if (VM_PAGEMD_CACHED_P(mdpg)) {
   1247 #endif
   1248 					UVMHIST_LOG(pmapexechist,
   1249 					    "pg %#jx (pa %#jx): "
   1250 					    "syncicached performed",
   1251 					    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
   1252 					    0, 0);
   1253 					pmap_page_syncicache(pg);
   1254 					PMAP_COUNT(exec_synced_protect);
   1255 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1256 				}
   1257 #endif
   1258 			}
   1259 		}
   1260 		pte = pte_prot_downgrade(pte, prot);
   1261 		if (*ptep != pte) {
   1262 			pmap_tlb_miss_lock_enter();
   1263 			pte_set(ptep, pte);
   1264 			/*
   1265 			 * Update the TLB if needed.
   1266 			 */
   1267 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
   1268 			pmap_tlb_miss_lock_exit();
   1269 		}
   1270 	}
   1271 
   1272 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1273 
   1274 	return false;
   1275 }
   1276 
   1277 /*
   1278  *	Set the physical protection on the
   1279  *	specified range of this map as requested.
   1280  */
   1281 void
   1282 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1283 {
   1284 	UVMHIST_FUNC(__func__);
   1285 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
   1286 	    (uintptr_t)pmap, sva, eva, prot);
   1287 	PMAP_COUNT(protect);
   1288 
   1289 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1290 		pmap_remove(pmap, sva, eva);
   1291 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1292 		return;
   1293 	}
   1294 
   1295 	/*
   1296 	 * Change protection on every valid mapping within this segment.
   1297 	 */
   1298 	kpreempt_disable();
   1299 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1300 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
   1301 	kpreempt_enable();
   1302 
   1303 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1304 }
   1305 
   1306 #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
   1307 /*
   1308  *	pmap_page_cache:
   1309  *
   1310  *	Change all mappings of a managed page to cached/uncached.
   1311  */
   1312 void
   1313 pmap_page_cache(struct vm_page_md *mdpg, bool cached)
   1314 {
   1315 #ifdef UVMHIST
   1316 	const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
   1317 	struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
   1318 #endif
   1319 
   1320 	UVMHIST_FUNC(__func__);
   1321 	UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
   1322 	    (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
   1323 
   1324 	KASSERT(kpreempt_disabled());
   1325 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1326 
   1327 	if (cached) {
   1328 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1329 		PMAP_COUNT(page_cache_restorations);
   1330 	} else {
   1331 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1332 		PMAP_COUNT(page_cache_evictions);
   1333 	}
   1334 
   1335 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
   1336 		pmap_t pmap = pv->pv_pmap;
   1337 		vaddr_t va = trunc_page(pv->pv_va);
   1338 
   1339 		KASSERT(pmap != NULL);
   1340 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1341 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1342 		if (ptep == NULL)
   1343 			continue;
   1344 		pt_entry_t pte = *ptep;
   1345 		if (pte_valid_p(pte)) {
   1346 			pte = pte_cached_change(pte, cached);
   1347 			pmap_tlb_miss_lock_enter();
   1348 			pte_set(ptep, pte);
   1349 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
   1350 			pmap_tlb_miss_lock_exit();
   1351 		}
   1352 	}
   1353 
   1354 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1355 }
   1356 #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
   1357 
   1358 /*
   1359  *	Insert the given physical page (p) at
   1360  *	the specified virtual address (v) in the
   1361  *	target physical map with the protection requested.
   1362  *
   1363  *	If specified, the page will be wired down, meaning
   1364  *	that the related pte can not be reclaimed.
   1365  *
   1366  *	NB:  This is the only routine which MAY NOT lazy-evaluate
   1367  *	or lose information.  That is, this routine must actually
   1368  *	insert this page into the given map NOW.
   1369  */
   1370 int
   1371 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1372 {
   1373 	const bool wired = (flags & PMAP_WIRED) != 0;
   1374 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1375 #if defined(EFI_RUNTIME)
   1376 	const bool is_efirt_pmap_p = (pmap == pmap_efirt());
   1377 #else
   1378 	const bool is_efirt_pmap_p = false;
   1379 #endif
   1380 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
   1381 #ifdef UVMHIST
   1382 	struct kern_history * const histp =
   1383 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
   1384 #endif
   1385 
   1386 	UVMHIST_FUNC(__func__);
   1387 	UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
   1388 	    (uintptr_t)pmap, va, pa, 0);
   1389 	UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
   1390 
   1391 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1392 	if (is_kernel_pmap_p) {
   1393 		PMAP_COUNT(kernel_mappings);
   1394 		if (!good_color)
   1395 			PMAP_COUNT(kernel_mappings_bad);
   1396 	} else {
   1397 		PMAP_COUNT(user_mappings);
   1398 		if (!good_color)
   1399 			PMAP_COUNT(user_mappings_bad);
   1400 	}
   1401 	pmap_addr_range_check(pmap, va, va, __func__);
   1402 
   1403 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
   1404 	    VM_PROT_READ, prot);
   1405 
   1406 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1407 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1408 
   1409 	struct vm_page_md *mdpp = NULL;
   1410 #ifdef __HAVE_PMAP_PV_TRACK
   1411 	struct pmap_page *pp = pmap_pv_tracked(pa);
   1412 	mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
   1413 #endif
   1414 
   1415 	if (mdpg) {
   1416 		/* Set page referenced/modified status based on flags */
   1417 		if (flags & VM_PROT_WRITE) {
   1418 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
   1419 		} else if (flags & VM_PROT_ALL) {
   1420 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1421 		}
   1422 
   1423 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1424 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
   1425 			flags |= PMAP_NOCACHE;
   1426 			PMAP_COUNT(uncached_mappings);
   1427 		}
   1428 #endif
   1429 
   1430 		PMAP_COUNT(managed_mappings);
   1431 	} else if (mdpp) {
   1432 #ifdef __HAVE_PMAP_PV_TRACK
   1433 		pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1434 
   1435 		PMAP_COUNT(pvtracked_mappings);
   1436 #endif
   1437 	} else if (is_efirt_pmap_p) {
   1438 		PMAP_COUNT(efirt_mappings);
   1439 	} else {
   1440 		/*
   1441 		 * Assumption: if it is not part of our managed memory
   1442 		 * then it must be device memory which may be volatile.
   1443 		 */
   1444 		if ((flags & PMAP_CACHE_MASK) == 0)
   1445 			flags |= PMAP_NOCACHE;
   1446 		PMAP_COUNT(unmanaged_mappings);
   1447 	}
   1448 
   1449 	KASSERTMSG(mdpg == NULL || mdpp == NULL || is_efirt_pmap_p,
   1450 	    "mdpg %p mdpp %p efirt %s", mdpg, mdpp,
   1451 	    is_efirt_pmap_p ? "true" : "false");
   1452 
   1453 	struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
   1454 	pt_entry_t npte = is_efirt_pmap_p ?
   1455 	    pte_make_enter_efirt(pa, prot, flags) :
   1456 	    pte_make_enter(pa, md, prot, flags, is_kernel_pmap_p);
   1457 
   1458 	kpreempt_disable();
   1459 
   1460 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1461 	if (__predict_false(ptep == NULL)) {
   1462 		kpreempt_enable();
   1463 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
   1464 		return ENOMEM;
   1465 	}
   1466 	const pt_entry_t opte = *ptep;
   1467 	const bool resident = pte_valid_p(opte);
   1468 	bool remap = false;
   1469 	if (resident) {
   1470 		if (pte_to_paddr(opte) != pa) {
   1471 			KASSERT(!is_kernel_pmap_p);
   1472 			const pt_entry_t rpte = pte_nv_entry(false);
   1473 
   1474 			pmap_addr_range_check(pmap, va, va + NBPG, __func__);
   1475 			pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
   1476 			    rpte);
   1477 			PMAP_COUNT(user_mappings_changed);
   1478 			remap = true;
   1479 		}
   1480 		update_flags |= PMAP_TLB_NEED_IPI;
   1481 	}
   1482 
   1483 	if (!resident || remap) {
   1484 		pmap->pm_stats.resident_count++;
   1485 	}
   1486 
   1487 	/* Done after case that may sleep/return. */
   1488 	if (md)
   1489 		pmap_enter_pv(pmap, va, pa, md, &npte, 0);
   1490 
   1491 	/*
   1492 	 * Now validate mapping with desired protection/wiring.
   1493 	 */
   1494 	if (wired) {
   1495 		pmap->pm_stats.wired_count++;
   1496 		npte = pte_wire_entry(npte);
   1497 	}
   1498 
   1499 	UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
   1500 	    pte_value(npte), pa, 0, 0);
   1501 
   1502 	KASSERT(pte_valid_p(npte));
   1503 
   1504 	pmap_tlb_miss_lock_enter();
   1505 	pte_set(ptep, npte);
   1506 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
   1507 	pmap_tlb_miss_lock_exit();
   1508 	kpreempt_enable();
   1509 
   1510 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1511 		KASSERT(mdpg != NULL);
   1512 		PMAP_COUNT(exec_mappings);
   1513 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1514 			if (!pte_deferred_exec_p(npte)) {
   1515 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
   1516 				    "immediate syncicache",
   1517 				    va, (uintptr_t)pg, 0, 0);
   1518 				pmap_page_syncicache(pg);
   1519 				pmap_page_set_attributes(mdpg,
   1520 				    VM_PAGEMD_EXECPAGE);
   1521 				PMAP_COUNT(exec_synced_mappings);
   1522 			} else {
   1523 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
   1524 				    "syncicache: pte %#jx",
   1525 				    va, (uintptr_t)pg, npte, 0);
   1526 			}
   1527 		} else {
   1528 			UVMHIST_LOG(*histp,
   1529 			    "va=%#jx pg %#jx: no syncicache cached %jd",
   1530 			    va, (uintptr_t)pg, pte_cached_p(npte), 0);
   1531 		}
   1532 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1533 		KASSERT(mdpg != NULL);
   1534 		KASSERT(prot & VM_PROT_WRITE);
   1535 		PMAP_COUNT(exec_mappings);
   1536 		pmap_page_syncicache(pg);
   1537 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1538 		UVMHIST_LOG(*histp,
   1539 		    "va=%#jx pg %#jx: immediate syncicache (writeable)",
   1540 		    va, (uintptr_t)pg, 0, 0);
   1541 	}
   1542 
   1543 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
   1544 	return 0;
   1545 }
   1546 
   1547 void
   1548 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1549 {
   1550 	pmap_t pmap = pmap_kernel();
   1551 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1552 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1553 
   1554 	UVMHIST_FUNC(__func__);
   1555 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
   1556 	    va, pa, prot, flags);
   1557 	PMAP_COUNT(kenter_pa);
   1558 
   1559 	if (mdpg == NULL) {
   1560 		PMAP_COUNT(kenter_pa_unmanaged);
   1561 		if ((flags & PMAP_CACHE_MASK) == 0)
   1562 			flags |= PMAP_NOCACHE;
   1563 	} else {
   1564 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1565 			PMAP_COUNT(kenter_pa_bad);
   1566 	}
   1567 
   1568 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1569 	kpreempt_disable();
   1570 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, 0);
   1571 
   1572 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
   1573 	    pmap_limits.virtual_end);
   1574 	KASSERT(!pte_valid_p(*ptep));
   1575 
   1576 	/*
   1577 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
   1578 	 */
   1579 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1580 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0
   1581 	    && pmap_md_virtual_cache_aliasing_p()) {
   1582 		pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
   1583 	}
   1584 #endif
   1585 
   1586 	/*
   1587 	 * We have the option to force this mapping into the TLB but we
   1588 	 * don't.  Instead let the next reference to the page do it.
   1589 	 */
   1590 	pmap_tlb_miss_lock_enter();
   1591 	pte_set(ptep, npte);
   1592 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1593 	pmap_tlb_miss_lock_exit();
   1594 	kpreempt_enable();
   1595 #if DEBUG > 1
   1596 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1597 		if (((long *)va)[i] != ((long *)pa)[i])
   1598 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1599 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1600 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1601 	}
   1602 #endif
   1603 
   1604 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
   1605 	    0);
   1606 }
   1607 
   1608 /*
   1609  *	Remove the given range of addresses from the kernel map.
   1610  *
   1611  *	It is assumed that the start and end are properly
   1612  *	rounded to the page size.
   1613  */
   1614 
   1615 static bool
   1616 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1617 	uintptr_t flags)
   1618 {
   1619 	const pt_entry_t new_pte = pte_nv_entry(true);
   1620 
   1621 	UVMHIST_FUNC(__func__);
   1622 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
   1623 	    (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
   1624 
   1625 	KASSERT(kpreempt_disabled());
   1626 
   1627 	for (; sva < eva; sva += NBPG, ptep++) {
   1628 		pt_entry_t pte = *ptep;
   1629 		if (!pte_valid_p(pte))
   1630 			continue;
   1631 
   1632 		PMAP_COUNT(kremove_pages);
   1633 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1634 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1635 		if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
   1636 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
   1637 		}
   1638 #endif
   1639 
   1640 		pmap_tlb_miss_lock_enter();
   1641 		pte_set(ptep, new_pte);
   1642 		pmap_tlb_invalidate_addr(pmap, sva);
   1643 		pmap_tlb_miss_lock_exit();
   1644 	}
   1645 
   1646 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1647 
   1648 	return false;
   1649 }
   1650 
   1651 void
   1652 pmap_kremove(vaddr_t va, vsize_t len)
   1653 {
   1654 	const vaddr_t sva = trunc_page(va);
   1655 	const vaddr_t eva = round_page(va + len);
   1656 
   1657 	UVMHIST_FUNC(__func__);
   1658 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
   1659 
   1660 	kpreempt_disable();
   1661 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1662 	kpreempt_enable();
   1663 
   1664 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1665 }
   1666 
   1667 bool
   1668 pmap_remove_all(struct pmap *pmap)
   1669 {
   1670 	UVMHIST_FUNC(__func__);
   1671 	UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1672 
   1673 	KASSERT(pmap != pmap_kernel());
   1674 
   1675 	kpreempt_disable();
   1676 	/*
   1677 	 * Free all of our ASIDs which means we can skip doing all the
   1678 	 * tlb_invalidate_addrs().
   1679 	 */
   1680 	pmap_tlb_miss_lock_enter();
   1681 #ifdef MULTIPROCESSOR
   1682 	// This should be the last CPU with this pmap onproc
   1683 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
   1684 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
   1685 #endif
   1686 		pmap_tlb_asid_deactivate(pmap);
   1687 #ifdef MULTIPROCESSOR
   1688 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
   1689 #endif
   1690 	pmap_tlb_asid_release_all(pmap);
   1691 	pmap_tlb_miss_lock_exit();
   1692 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1693 
   1694 #ifdef PMAP_FAULTINFO
   1695 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1696 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1697 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
   1698 #endif
   1699 	kpreempt_enable();
   1700 
   1701 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1702 	return false;
   1703 }
   1704 
   1705 /*
   1706  *	Routine:	pmap_unwire
   1707  *	Function:	Clear the wired attribute for a map/virtual-address
   1708  *			pair.
   1709  *	In/out conditions:
   1710  *			The mapping must already exist in the pmap.
   1711  */
   1712 void
   1713 pmap_unwire(pmap_t pmap, vaddr_t va)
   1714 {
   1715 	UVMHIST_FUNC(__func__);
   1716 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
   1717 	    0, 0);
   1718 	PMAP_COUNT(unwire);
   1719 
   1720 	/*
   1721 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1722 	 */
   1723 	kpreempt_disable();
   1724 	pmap_addr_range_check(pmap, va, va, __func__);
   1725 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1726 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
   1727 	    pmap, va);
   1728 	pt_entry_t pte = *ptep;
   1729 	KASSERTMSG(pte_valid_p(pte),
   1730 	    "pmap %p va %#" PRIxVADDR " invalid PTE %#" PRIxPTE " @ %p",
   1731 	    pmap, va, pte_value(pte), ptep);
   1732 
   1733 	if (pte_wired_p(pte)) {
   1734 		pmap_tlb_miss_lock_enter();
   1735 		pte_set(ptep, pte_unwire_entry(pte));
   1736 		pmap_tlb_miss_lock_exit();
   1737 		pmap->pm_stats.wired_count--;
   1738 	}
   1739 #ifdef DIAGNOSTIC
   1740 	else {
   1741 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1742 		    __func__, pmap, va);
   1743 	}
   1744 #endif
   1745 	kpreempt_enable();
   1746 
   1747 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1748 }
   1749 
   1750 /*
   1751  *	Routine:	pmap_extract
   1752  *	Function:
   1753  *		Extract the physical page address associated
   1754  *		with the given map/virtual_address pair.
   1755  */
   1756 bool
   1757 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1758 {
   1759 	paddr_t pa;
   1760 
   1761 	if (pmap == pmap_kernel()) {
   1762 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1763 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1764 			goto done;
   1765 		}
   1766 		if (pmap_md_io_vaddr_p(va))
   1767 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1768 
   1769 		if (va >= pmap_limits.virtual_end)
   1770 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
   1771 			    __func__, va);
   1772 	}
   1773 	kpreempt_disable();
   1774 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1775 	if (ptep == NULL || !pte_valid_p(*ptep)) {
   1776 		kpreempt_enable();
   1777 		return false;
   1778 	}
   1779 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1780 	kpreempt_enable();
   1781 done:
   1782 	if (pap != NULL) {
   1783 		*pap = pa;
   1784 	}
   1785 	return true;
   1786 }
   1787 
   1788 /*
   1789  *	Copy the range specified by src_addr/len
   1790  *	from the source map to the range dst_addr/len
   1791  *	in the destination map.
   1792  *
   1793  *	This routine is only advisory and need not do anything.
   1794  */
   1795 void
   1796 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1797     vaddr_t src_addr)
   1798 {
   1799 	UVMHIST_FUNC(__func__);
   1800 	UVMHIST_CALLED(pmaphist);
   1801 	PMAP_COUNT(copy);
   1802 }
   1803 
   1804 /*
   1805  *	pmap_clear_reference:
   1806  *
   1807  *	Clear the reference bit on the specified physical page.
   1808  */
   1809 bool
   1810 pmap_clear_reference(struct vm_page *pg)
   1811 {
   1812 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1813 
   1814 	UVMHIST_FUNC(__func__);
   1815 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
   1816 	   (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1817 
   1818 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1819 
   1820 	UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
   1821 
   1822 	return rv;
   1823 }
   1824 
   1825 /*
   1826  *	pmap_is_referenced:
   1827  *
   1828  *	Return whether or not the specified physical page is referenced
   1829  *	by any physical maps.
   1830  */
   1831 bool
   1832 pmap_is_referenced(struct vm_page *pg)
   1833 {
   1834 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1835 }
   1836 
   1837 /*
   1838  *	Clear the modify bits on the specified physical page.
   1839  */
   1840 bool
   1841 pmap_clear_modify(struct vm_page *pg)
   1842 {
   1843 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1844 	pv_entry_t pv = &mdpg->mdpg_first;
   1845 	pv_entry_t pv_next;
   1846 
   1847 	UVMHIST_FUNC(__func__);
   1848 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
   1849 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1850 	PMAP_COUNT(clear_modify);
   1851 
   1852 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1853 		if (pv->pv_pmap == NULL) {
   1854 			UVMHIST_LOG(pmapexechist,
   1855 			    "pg %#jx (pa %#jx): execpage cleared",
   1856 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1857 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1858 			PMAP_COUNT(exec_uncached_clear_modify);
   1859 		} else {
   1860 			UVMHIST_LOG(pmapexechist,
   1861 			    "pg %#jx (pa %#jx): syncicache performed",
   1862 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1863 			pmap_page_syncicache(pg);
   1864 			PMAP_COUNT(exec_synced_clear_modify);
   1865 		}
   1866 	}
   1867 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1868 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
   1869 		return false;
   1870 	}
   1871 	if (pv->pv_pmap == NULL) {
   1872 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
   1873 		return true;
   1874 	}
   1875 
   1876 	/*
   1877 	 * remove write access from any pages that are dirty
   1878 	 * so we can tell if they are written to again later.
   1879 	 * flush the VAC first if there is one.
   1880 	 */
   1881 	kpreempt_disable();
   1882 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1883 	pmap_pvlist_check(mdpg);
   1884 	for (; pv != NULL; pv = pv_next) {
   1885 		pmap_t pmap = pv->pv_pmap;
   1886 		vaddr_t va = trunc_page(pv->pv_va);
   1887 
   1888 		pv_next = pv->pv_next;
   1889 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1890 		if (PV_ISKENTER_P(pv))
   1891 			continue;
   1892 #endif
   1893 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1894 		KASSERT(ptep);
   1895 		pt_entry_t pte = pte_prot_nowrite(*ptep);
   1896 		if (*ptep == pte) {
   1897 			continue;
   1898 		}
   1899 		KASSERT(pte_valid_p(pte));
   1900 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1901 		pmap_tlb_miss_lock_enter();
   1902 		pte_set(ptep, pte);
   1903 		pmap_tlb_invalidate_addr(pmap, va);
   1904 		pmap_tlb_miss_lock_exit();
   1905 		pmap_update(pmap);
   1906 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
   1907 			/*
   1908 			 * The list changed!  So restart from the beginning.
   1909 			 */
   1910 			pv_next = &mdpg->mdpg_first;
   1911 			pmap_pvlist_check(mdpg);
   1912 		}
   1913 	}
   1914 	pmap_pvlist_check(mdpg);
   1915 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1916 	kpreempt_enable();
   1917 
   1918 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
   1919 	return true;
   1920 }
   1921 
   1922 /*
   1923  *	pmap_is_modified:
   1924  *
   1925  *	Return whether or not the specified physical page is modified
   1926  *	by any physical maps.
   1927  */
   1928 bool
   1929 pmap_is_modified(struct vm_page *pg)
   1930 {
   1931 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1932 }
   1933 
   1934 /*
   1935  *	pmap_set_modified:
   1936  *
   1937  *	Sets the page modified reference bit for the specified page.
   1938  */
   1939 void
   1940 pmap_set_modified(paddr_t pa)
   1941 {
   1942 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1943 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1944 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
   1945 }
   1946 
   1947 /******************** pv_entry management ********************/
   1948 
   1949 static void
   1950 pmap_pvlist_check(struct vm_page_md *mdpg)
   1951 {
   1952 #ifdef DEBUG
   1953 	pv_entry_t pv = &mdpg->mdpg_first;
   1954 	if (pv->pv_pmap != NULL) {
   1955 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1956 		const u_int colormask = uvmexp.colormask;
   1957 		u_int colors = 0;
   1958 #endif
   1959 		for (; pv != NULL; pv = pv->pv_next) {
   1960 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1961 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1962 			colors |= __BIT(atop(pv->pv_va) & colormask);
   1963 #endif
   1964 		}
   1965 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1966 		// Assert that if there is more than 1 color mapped, that the
   1967 		// page is uncached.
   1968 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
   1969 		    || colors == 0 || (colors & (colors-1)) == 0
   1970 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
   1971 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
   1972 #endif
   1973 	} else {
   1974 		KASSERT(pv->pv_next == NULL);
   1975 	}
   1976 #endif /* DEBUG */
   1977 }
   1978 
   1979 /*
   1980  * Enter the pmap and virtual address into the
   1981  * physical to virtual map table.
   1982  */
   1983 void
   1984 pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
   1985     pt_entry_t *nptep, u_int flags)
   1986 {
   1987 	pv_entry_t pv, npv, apv;
   1988 #ifdef UVMHIST
   1989 	bool first = false;
   1990 	struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
   1991 	    NULL;
   1992 #endif
   1993 
   1994 	UVMHIST_FUNC(__func__);
   1995 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
   1996 	    (uintptr_t)pmap, va, (uintptr_t)pg, pa);
   1997 	UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
   1998 	    (uintptr_t)nptep, pte_value(*nptep), 0, 0);
   1999 
   2000 	KASSERT(kpreempt_disabled());
   2001 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   2002 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
   2003 	    "va %#"PRIxVADDR, va);
   2004 
   2005 	apv = NULL;
   2006 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   2007 again:
   2008 	pv = &mdpg->mdpg_first;
   2009 	pmap_pvlist_check(mdpg);
   2010 	if (pv->pv_pmap == NULL) {
   2011 		KASSERT(pv->pv_next == NULL);
   2012 		/*
   2013 		 * No entries yet, use header as the first entry
   2014 		 */
   2015 		PMAP_COUNT(primary_mappings);
   2016 		PMAP_COUNT(mappings);
   2017 #ifdef UVMHIST
   2018 		first = true;
   2019 #endif
   2020 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2021 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
   2022 		// If the new mapping has an incompatible color the last
   2023 		// mapping of this page, clean the page before using it.
   2024 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
   2025 			pmap_md_vca_clean(mdpg, PMAP_WBINV);
   2026 		}
   2027 #endif
   2028 		pv->pv_pmap = pmap;
   2029 		pv->pv_va = va | flags;
   2030 	} else {
   2031 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2032 		if (pmap_md_vca_add(mdpg, va, nptep)) {
   2033 			goto again;
   2034 		}
   2035 #endif
   2036 
   2037 		/*
   2038 		 * There is at least one other VA mapping this page.
   2039 		 * Place this entry after the header.
   2040 		 *
   2041 		 * Note: the entry may already be in the table if
   2042 		 * we are only changing the protection bits.
   2043 		 */
   2044 
   2045 		for (npv = pv; npv; npv = npv->pv_next) {
   2046 			if (pmap == npv->pv_pmap
   2047 			    && va == trunc_page(npv->pv_va)) {
   2048 #ifdef PARANOIADIAG
   2049 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   2050 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
   2051 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
   2052 					printf("%s: found va %#"PRIxVADDR
   2053 					    " pa %#"PRIxPADDR
   2054 					    " in pv_table but != %#"PRIxPTE"\n",
   2055 					    __func__, va, pa, pte_value(pte));
   2056 #endif
   2057 				PMAP_COUNT(remappings);
   2058 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2059 				if (__predict_false(apv != NULL))
   2060 					pmap_pv_free(apv);
   2061 
   2062 				UVMHIST_LOG(pmaphist,
   2063 				    " <-- done pv=%#jx (reused)",
   2064 				    (uintptr_t)pv, 0, 0, 0);
   2065 				return;
   2066 			}
   2067 		}
   2068 		if (__predict_true(apv == NULL)) {
   2069 			/*
   2070 			 * To allocate a PV, we have to release the PVLIST lock
   2071 			 * so get the page generation.  We allocate the PV, and
   2072 			 * then reacquire the lock.
   2073 			 */
   2074 			pmap_pvlist_check(mdpg);
   2075 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2076 
   2077 			apv = (pv_entry_t)pmap_pv_alloc();
   2078 			if (apv == NULL)
   2079 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   2080 
   2081 			/*
   2082 			 * If the generation has changed, then someone else
   2083 			 * tinkered with this page so we should start over.
   2084 			 */
   2085 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
   2086 				goto again;
   2087 		}
   2088 		npv = apv;
   2089 		apv = NULL;
   2090 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2091 		/*
   2092 		 * If need to deal with virtual cache aliases, keep mappings
   2093 		 * in the kernel pmap at the head of the list.  This allows
   2094 		 * the VCA code to easily use them for cache operations if
   2095 		 * present.
   2096 		 */
   2097 		pmap_t kpmap = pmap_kernel();
   2098 		if (pmap != kpmap) {
   2099 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
   2100 				pv = pv->pv_next;
   2101 			}
   2102 		}
   2103 #endif
   2104 		npv->pv_va = va | flags;
   2105 		npv->pv_pmap = pmap;
   2106 		npv->pv_next = pv->pv_next;
   2107 		pv->pv_next = npv;
   2108 		PMAP_COUNT(mappings);
   2109 	}
   2110 	pmap_pvlist_check(mdpg);
   2111 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2112 	if (__predict_false(apv != NULL))
   2113 		pmap_pv_free(apv);
   2114 
   2115 	UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
   2116 	    first, 0, 0);
   2117 }
   2118 
   2119 /*
   2120  * Remove a physical to virtual address translation.
   2121  * If cache was inhibited on this page, and there are no more cache
   2122  * conflicts, restore caching.
   2123  * Flush the cache if the last page is removed (should always be cached
   2124  * at this point).
   2125  */
   2126 void
   2127 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   2128 {
   2129 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2130 	pv_entry_t pv, npv;
   2131 	bool last;
   2132 
   2133 	UVMHIST_FUNC(__func__);
   2134 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
   2135 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
   2136 	UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
   2137 
   2138 	KASSERT(kpreempt_disabled());
   2139 	KASSERT((va & PAGE_MASK) == 0);
   2140 	pv = &mdpg->mdpg_first;
   2141 
   2142 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   2143 	pmap_pvlist_check(mdpg);
   2144 
   2145 	/*
   2146 	 * If it is the first entry on the list, it is actually
   2147 	 * in the header and we must copy the following entry up
   2148 	 * to the header.  Otherwise we must search the list for
   2149 	 * the entry.  In either case we free the now unused entry.
   2150 	 */
   2151 
   2152 	last = false;
   2153 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
   2154 		npv = pv->pv_next;
   2155 		if (npv) {
   2156 			*pv = *npv;
   2157 			KASSERT(pv->pv_pmap != NULL);
   2158 		} else {
   2159 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2160 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   2161 #endif
   2162 			pv->pv_pmap = NULL;
   2163 			last = true;	/* Last mapping removed */
   2164 		}
   2165 		PMAP_COUNT(remove_pvfirst);
   2166 	} else {
   2167 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   2168 			PMAP_COUNT(remove_pvsearch);
   2169 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
   2170 				break;
   2171 		}
   2172 		if (npv) {
   2173 			pv->pv_next = npv->pv_next;
   2174 		}
   2175 	}
   2176 
   2177 	pmap_pvlist_check(mdpg);
   2178 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2179 
   2180 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2181 	pmap_md_vca_remove(pg, va, dirty, last);
   2182 #endif
   2183 
   2184 	/*
   2185 	 * Free the pv_entry if needed.
   2186 	 */
   2187 	if (npv)
   2188 		pmap_pv_free(npv);
   2189 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   2190 		if (last) {
   2191 			/*
   2192 			 * If this was the page's last mapping, we no longer
   2193 			 * care about its execness.
   2194 			 */
   2195 			UVMHIST_LOG(pmapexechist,
   2196 			    "pg %#jx (pa %#jx)last %ju: execpage cleared",
   2197 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2198 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   2199 			PMAP_COUNT(exec_uncached_remove);
   2200 		} else {
   2201 			/*
   2202 			 * Someone still has it mapped as an executable page
   2203 			 * so we must sync it.
   2204 			 */
   2205 			UVMHIST_LOG(pmapexechist,
   2206 			    "pg %#jx (pa %#jx) last %ju: performed syncicache",
   2207 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2208 			pmap_page_syncicache(pg);
   2209 			PMAP_COUNT(exec_synced_remove);
   2210 		}
   2211 	}
   2212 
   2213 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   2214 }
   2215 
   2216 #if defined(MULTIPROCESSOR)
   2217 struct pmap_pvlist_info {
   2218 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   2219 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   2220 	volatile u_int pli_lock_index;
   2221 	u_int pli_lock_mask;
   2222 } pmap_pvlist_info;
   2223 
   2224 void
   2225 pmap_pvlist_lock_init(size_t cache_line_size)
   2226 {
   2227 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2228 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   2229 	vaddr_t lock_va = lock_page;
   2230 	if (sizeof(kmutex_t) > cache_line_size) {
   2231 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   2232 	}
   2233 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   2234 	KASSERT((nlocks & (nlocks - 1)) == 0);
   2235 	/*
   2236 	 * Now divide the page into a number of mutexes, one per cacheline.
   2237 	 */
   2238 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   2239 		kmutex_t * const lock = (kmutex_t *)lock_va;
   2240 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
   2241 		pli->pli_locks[i] = lock;
   2242 	}
   2243 	pli->pli_lock_mask = nlocks - 1;
   2244 }
   2245 
   2246 kmutex_t *
   2247 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2248 {
   2249 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2250 	kmutex_t *lock = mdpg->mdpg_lock;
   2251 
   2252 	/*
   2253 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   2254 	 * semi-random distribution not based on page color.
   2255 	 */
   2256 	if (__predict_false(lock == NULL)) {
   2257 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   2258 		size_t lockid = locknum & pli->pli_lock_mask;
   2259 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   2260 		/*
   2261 		 * Set the lock.  If some other thread already did, just use
   2262 		 * the one they assigned.
   2263 		 */
   2264 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   2265 		if (lock == NULL) {
   2266 			lock = new_lock;
   2267 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   2268 		}
   2269 	}
   2270 
   2271 	/*
   2272 	 * Now finally provide the lock.
   2273 	 */
   2274 	return lock;
   2275 }
   2276 #else /* !MULTIPROCESSOR */
   2277 void
   2278 pmap_pvlist_lock_init(size_t cache_line_size)
   2279 {
   2280 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
   2281 }
   2282 
   2283 #ifdef MODULAR
   2284 kmutex_t *
   2285 pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2286 {
   2287 	/*
   2288 	 * We just use a global lock.
   2289 	 */
   2290 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   2291 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   2292 	}
   2293 
   2294 	/*
   2295 	 * Now finally provide the lock.
   2296 	 */
   2297 	return mdpg->mdpg_lock;
   2298 }
   2299 #endif /* MODULAR */
   2300 #endif /* !MULTIPROCESSOR */
   2301 
   2302 /*
   2303  * pmap_pv_page_alloc:
   2304  *
   2305  *	Allocate a page for the pv_entry pool.
   2306  */
   2307 void *
   2308 pmap_pv_page_alloc(struct pool *pp, int flags)
   2309 {
   2310 	struct vm_page * const pg = pmap_md_alloc_poolpage(UVM_PGA_USERESERVE);
   2311 	if (pg == NULL)
   2312 		return NULL;
   2313 
   2314 	return (void *)pmap_md_map_poolpage(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
   2315 }
   2316 
   2317 /*
   2318  * pmap_pv_page_free:
   2319  *
   2320  *	Free a pv_entry pool page.
   2321  */
   2322 void
   2323 pmap_pv_page_free(struct pool *pp, void *v)
   2324 {
   2325 	vaddr_t va = (vaddr_t)v;
   2326 
   2327 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2328 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2329 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2330 	KASSERT(pg != NULL);
   2331 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2332 	kpreempt_disable();
   2333 	pmap_md_vca_remove(pg, va, true, true);
   2334 	kpreempt_enable();
   2335 #endif
   2336 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2337 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2338 	uvm_pagefree(pg);
   2339 }
   2340 
   2341 #ifdef PMAP_PREFER
   2342 /*
   2343  * Find first virtual address >= *vap that doesn't cause
   2344  * a cache alias conflict.
   2345  */
   2346 void
   2347 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   2348 {
   2349 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   2350 
   2351 	PMAP_COUNT(prefer_requests);
   2352 
   2353 	prefer_mask |= pmap_md_cache_prefer_mask();
   2354 
   2355 	if (prefer_mask) {
   2356 		vaddr_t	va = *vap;
   2357 		vsize_t d = (foff - va) & prefer_mask;
   2358 		if (d) {
   2359 			if (td)
   2360 				*vap = trunc_page(va - ((-d) & prefer_mask));
   2361 			else
   2362 				*vap = round_page(va + d);
   2363 			PMAP_COUNT(prefer_adjustments);
   2364 		}
   2365 	}
   2366 }
   2367 #endif /* PMAP_PREFER */
   2368 
   2369 #ifdef PMAP_MAP_POOLPAGE
   2370 vaddr_t
   2371 pmap_map_poolpage(paddr_t pa)
   2372 {
   2373 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2374 	KASSERT(pg);
   2375 
   2376 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2377 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
   2378 
   2379 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   2380 
   2381 	return pmap_md_map_poolpage(pa, NBPG);
   2382 }
   2383 
   2384 paddr_t
   2385 pmap_unmap_poolpage(vaddr_t va)
   2386 {
   2387 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2388 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2389 
   2390 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2391 	KASSERT(pg != NULL);
   2392 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2393 
   2394 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2395 	pmap_md_unmap_poolpage(va, NBPG);
   2396 
   2397 	return pa;
   2398 }
   2399 #endif /* PMAP_MAP_POOLPAGE */
   2400 
   2401 #ifdef DDB
   2402 void
   2403 pmap_db_mdpg_print(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
   2404 {
   2405 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2406 	pv_entry_t pv = &mdpg->mdpg_first;
   2407 
   2408 	if (pv->pv_pmap == NULL) {
   2409 		pr(" no mappings\n");
   2410 		return;
   2411 	}
   2412 
   2413 	int lcount = 0;
   2414 	if (VM_PAGEMD_VMPAGE_P(mdpg)) {
   2415 		pr(" vmpage");
   2416 		lcount++;
   2417 	}
   2418 	if (VM_PAGEMD_POOLPAGE_P(mdpg)) {
   2419 		if (lcount != 0)
   2420 			pr(",");
   2421 		pr(" pool");
   2422 		lcount++;
   2423 	}
   2424 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2425 	if (VM_PAGEMD_UNCACHED_P(mdpg)) {
   2426 		if (lcount != 0)
   2427 			pr(",");
   2428 		pr(" uncached\n");
   2429 	}
   2430 #endif
   2431 	pr("\n");
   2432 
   2433 	lcount = 0;
   2434 	if (VM_PAGEMD_REFERENCED_P(mdpg)) {
   2435 		pr(" referened");
   2436 		lcount++;
   2437 	}
   2438 	if (VM_PAGEMD_MODIFIED_P(mdpg)) {
   2439 		if (lcount != 0)
   2440 			pr(",");
   2441 		pr(" modified");
   2442 		lcount++;
   2443 	}
   2444 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   2445 		if (lcount != 0)
   2446 			pr(",");
   2447 		pr(" exec");
   2448 		lcount++;
   2449 	}
   2450 	pr("\n");
   2451 
   2452 	for (size_t i = 0; pv != NULL; pv = pv->pv_next) {
   2453 		pr("  pv[%zu] pv=%p\n", i, pv);
   2454 		pr("    pv[%zu].pv_pmap = %p", i, pv->pv_pmap);
   2455 		pr("    pv[%zu].pv_va   = %" PRIxVADDR " (kenter=%s)\n",
   2456 		    i, trunc_page(pv->pv_va), PV_ISKENTER_P(pv) ? "true" : "false");
   2457 		i++;
   2458 	}
   2459 }
   2460 
   2461 void
   2462 pmap_db_pmap_print(struct pmap *pm,
   2463     void (*pr)(const char *, ...) __printflike(1, 2))
   2464 {
   2465 #if defined(PMAP_HWPAGEWALKER)
   2466 	pr(" pm_pdetab     = %p\n", pm->pm_pdetab);
   2467 #endif
   2468 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
   2469 	pr(" pm_segtab     = %p\n", pm->pm_segtab);
   2470 #endif
   2471 
   2472 	pmap_db_tlb_print(pm, pr);
   2473 }
   2474 #endif /* DDB */
   2475