Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.12
      1 /*	$NetBSD: pmap.c,v 1.12 2015/06/11 05:27:07 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.12 2015/06/11 05:27:07 matt Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #include "opt_modular.h"
     99 #include "opt_multiprocessor.h"
    100 #include "opt_sysv.h"
    101 
    102 #define __PMAP_PRIVATE
    103 
    104 #include <sys/param.h>
    105 #include <sys/systm.h>
    106 #include <sys/proc.h>
    107 #include <sys/buf.h>
    108 #include <sys/pool.h>
    109 #include <sys/atomic.h>
    110 #include <sys/mutex.h>
    111 #include <sys/atomic.h>
    112 #ifdef SYSVSHM
    113 #include <sys/shm.h>
    114 #endif
    115 #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
    116 
    117 #include <uvm/uvm.h>
    118 
    119 #define	PMAP_COUNT(name)	(pmap_evcnt_##name.ev_count++ + 0)
    120 #define PMAP_COUNTER(name, desc) \
    121 static struct evcnt pmap_evcnt_##name = \
    122 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
    123 EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
    124 
    125 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    126 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    127 PMAP_COUNTER(remove_user_calls, "remove user calls");
    128 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    129 PMAP_COUNTER(remove_flushes, "remove cache flushes");
    130 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    131 PMAP_COUNTER(remove_pvfirst, "remove pv first");
    132 PMAP_COUNTER(remove_pvsearch, "remove pv search");
    133 
    134 PMAP_COUNTER(prefer_requests, "prefer requests");
    135 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    136 
    137 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    138 PMAP_COUNTER(zeroed_pages, "pages zeroed");
    139 PMAP_COUNTER(copied_pages, "pages copied");
    140 
    141 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    142 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    143 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    144 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    145 
    146 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    147 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    148 
    149 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    150 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    151 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    152 PMAP_COUNTER(user_mappings, "user pages mapped");
    153 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    154 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    155 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    156 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    157 PMAP_COUNTER(managed_mappings, "managed pages mapped");
    158 PMAP_COUNTER(mappings, "pages mapped");
    159 PMAP_COUNTER(remappings, "pages remapped");
    160 PMAP_COUNTER(unmappings, "pages unmapped");
    161 PMAP_COUNTER(primary_mappings, "page initial mappings");
    162 PMAP_COUNTER(primary_unmappings, "page final unmappings");
    163 PMAP_COUNTER(tlb_hit, "page mapping");
    164 
    165 PMAP_COUNTER(exec_mappings, "exec pages mapped");
    166 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    167 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    168 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    169 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    170 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    171 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    172 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    173 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    174 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    175 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    176 
    177 PMAP_COUNTER(create, "creates");
    178 PMAP_COUNTER(reference, "references");
    179 PMAP_COUNTER(dereference, "dereferences");
    180 PMAP_COUNTER(destroy, "destroyed");
    181 PMAP_COUNTER(activate, "activations");
    182 PMAP_COUNTER(deactivate, "deactivations");
    183 PMAP_COUNTER(update, "updates");
    184 #ifdef MULTIPROCESSOR
    185 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    186 #endif
    187 PMAP_COUNTER(unwire, "unwires");
    188 PMAP_COUNTER(copy, "copies");
    189 PMAP_COUNTER(clear_modify, "clear_modifies");
    190 PMAP_COUNTER(protect, "protects");
    191 PMAP_COUNTER(page_protect, "page_protects");
    192 
    193 #define PMAP_ASID_RESERVED 0
    194 CTASSERT(PMAP_ASID_RESERVED == 0);
    195 
    196 /*
    197  * Initialize the kernel pmap.
    198  */
    199 #ifdef MULTIPROCESSOR
    200 #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
    201 #else
    202 #define	PMAP_SIZE	sizeof(struct pmap)
    203 kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
    204 #endif
    205 
    206 struct pmap_kernel kernel_pmap_store = {
    207 	.kernel_pmap = {
    208 		.pm_count = 1,
    209 		.pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
    210 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    211 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    212 	},
    213 };
    214 
    215 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    216 
    217 struct pmap_limits pmap_limits = {
    218 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    219 };
    220 
    221 #ifdef UVMHIST
    222 static struct kern_history_ent pmapexechistbuf[10000];
    223 static struct kern_history_ent pmaphistbuf[10000];
    224 UVMHIST_DEFINE(pmapexechist);
    225 UVMHIST_DEFINE(pmaphist);
    226 #endif
    227 
    228 /*
    229  * The pools from which pmap structures and sub-structures are allocated.
    230  */
    231 struct pool pmap_pmap_pool;
    232 struct pool pmap_pv_pool;
    233 
    234 #ifndef PMAP_PV_LOWAT
    235 #define	PMAP_PV_LOWAT	16
    236 #endif
    237 int		pmap_pv_lowat = PMAP_PV_LOWAT;
    238 
    239 bool		pmap_initialized = false;
    240 #define	PMAP_PAGE_COLOROK_P(a, b) \
    241 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    242 u_int		pmap_page_colormask;
    243 
    244 #define PAGE_IS_MANAGED(pa)	\
    245 	(pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
    246 
    247 #define PMAP_IS_ACTIVE(pm)						\
    248 	((pm) == pmap_kernel() || 					\
    249 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    250 
    251 /* Forward function declarations */
    252 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    253 void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
    254 
    255 /*
    256  * PV table management functions.
    257  */
    258 void	*pmap_pv_page_alloc(struct pool *, int);
    259 void	pmap_pv_page_free(struct pool *, void *);
    260 
    261 struct pool_allocator pmap_pv_page_allocator = {
    262 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    263 };
    264 
    265 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    266 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    267 
    268 #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
    269 #define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
    270 #define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
    271 #endif	/* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
    272 
    273 /*
    274  * Misc. functions.
    275  */
    276 
    277 bool
    278 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    279 {
    280 	volatile u_int * const attrp = &mdpg->mdpg_attrs;
    281 #ifdef MULTIPROCESSOR
    282 	for (;;) {
    283 		u_int old_attr = *attrp;
    284 		if ((old_attr & clear_attributes) == 0)
    285 			return false;
    286 		u_int new_attr = old_attr & ~clear_attributes;
    287 		if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
    288 			return true;
    289 	}
    290 #else
    291 	u_int old_attr = *attrp;
    292 	if ((old_attr & clear_attributes) == 0)
    293 		return false;
    294 	*attrp &= ~clear_attributes;
    295 	return true;
    296 #endif
    297 }
    298 
    299 void
    300 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    301 {
    302 #ifdef MULTIPROCESSOR
    303 	atomic_or_uint(&mdpg->mdpg_attrs, set_attributes);
    304 #else
    305 	mdpg->mdpg_attrs |= set_attributes;
    306 #endif
    307 }
    308 
    309 static void
    310 pmap_page_syncicache(struct vm_page *pg)
    311 {
    312 #ifndef MULTIPROCESSOR
    313 	struct pmap * const curpmap = curcpu()->ci_curpm;
    314 #endif
    315 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    316 	pv_entry_t pv = &mdpg->mdpg_first;
    317 	kcpuset_t *onproc;
    318 #ifdef MULTIPROCESSOR
    319 	kcpuset_create(&onproc, true);
    320 #else
    321 	onproc = NULL;
    322 #endif
    323 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    324 
    325 	if (pv->pv_pmap != NULL) {
    326 		for (; pv != NULL; pv = pv->pv_next) {
    327 #ifdef MULTIPROCESSOR
    328 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    329 			if (kcpuset_match(onproc, kcpuset_running)) {
    330 				break;
    331 			}
    332 #else
    333 			if (pv->pv_pmap == curpmap) {
    334 				onproc = curcpu()->ci_data.cpu_kcpuset;
    335 				break;
    336 			}
    337 #endif
    338 		}
    339 	}
    340 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    341 	kpreempt_disable();
    342 	pmap_md_page_syncicache(pg, onproc);
    343 #ifdef MULTIPROCESSOR
    344 	kcpuset_destroy(onproc);
    345 #endif
    346 	kpreempt_enable();
    347 }
    348 
    349 /*
    350  * Define the initial bounds of the kernel virtual address space.
    351  */
    352 void
    353 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    354 {
    355 
    356 	*vstartp = pmap_limits.virtual_start;
    357 	*vendp = pmap_limits.virtual_end;
    358 }
    359 
    360 vaddr_t
    361 pmap_growkernel(vaddr_t maxkvaddr)
    362 {
    363 	vaddr_t virtual_end = pmap_limits.virtual_end;
    364 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    365 
    366 	/*
    367 	 * Reserve PTEs for the new KVA space.
    368 	 */
    369 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    370 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    371 	}
    372 
    373 	/*
    374 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    375 	 */
    376 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    377 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    378 
    379 	/*
    380 	 * Update new end.
    381 	 */
    382 	pmap_limits.virtual_end = virtual_end;
    383 	return virtual_end;
    384 }
    385 
    386 /*
    387  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    388  * This function allows for early dynamic memory allocation until the virtual
    389  * memory system has been bootstrapped.  After that point, either kmem_alloc
    390  * or malloc should be used.  This function works by stealing pages from the
    391  * (to be) managed page pool, then implicitly mapping the pages (by using
    392  * their k0seg addresses) and zeroing them.
    393  *
    394  * It may be used once the physical memory segments have been pre-loaded
    395  * into the vm_physmem[] array.  Early memory allocation MUST use this
    396  * interface!  This cannot be used after vm_page_startup(), and will
    397  * generate a panic if tried.
    398  *
    399  * Note that this memory will never be freed, and in essence it is wired
    400  * down.
    401  *
    402  * We must adjust *vstartp and/or *vendp iff we use address space
    403  * from the kernel virtual address range defined by pmap_virtual_space().
    404  */
    405 vaddr_t
    406 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    407 {
    408 	u_int npgs;
    409 	paddr_t pa;
    410 	vaddr_t va;
    411 
    412 	size = round_page(size);
    413 	npgs = atop(size);
    414 
    415 	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
    416 		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
    417 		if (uvm.page_init_done == true)
    418 			panic("pmap_steal_memory: called _after_ bootstrap");
    419 
    420 		if (seg->avail_start != seg->start ||
    421 		    seg->avail_start >= seg->avail_end)
    422 			continue;
    423 
    424 		if ((seg->avail_end - seg->avail_start) < npgs)
    425 			continue;
    426 
    427 		/*
    428 		 * There are enough pages here; steal them!
    429 		 */
    430 		pa = ptoa(seg->avail_start);
    431 		seg->avail_start += npgs;
    432 		seg->start += npgs;
    433 
    434 		/*
    435 		 * Have we used up this segment?
    436 		 */
    437 		if (seg->avail_start == seg->end) {
    438 			if (vm_nphysseg == 1)
    439 				panic("pmap_steal_memory: out of memory!");
    440 
    441 			/* Remove this segment from the list. */
    442 			vm_nphysseg--;
    443 			if (bank < vm_nphysseg)
    444 				memmove(seg, seg+1,
    445 				    sizeof(*seg) * (vm_nphysseg - bank));
    446 		}
    447 
    448 		va = pmap_md_map_poolpage(pa, size);
    449 		memset((void *)va, 0, size);
    450 		return va;
    451 	}
    452 
    453 	/*
    454 	 * If we got here, there was no memory left.
    455 	 */
    456 	panic("pmap_steal_memory: no memory to steal");
    457 }
    458 
    459 /*
    460  *	Initialize the pmap module.
    461  *	Called by vm_init, to initialize any structures that the pmap
    462  *	system needs to map virtual memory.
    463  */
    464 void
    465 pmap_init(void)
    466 {
    467 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    468 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    469 
    470 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    471 
    472 	/*
    473 	 * Initialize the segtab lock.
    474 	 */
    475 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    476 
    477 	/*
    478 	 * Set a low water mark on the pv_entry pool, so that we are
    479 	 * more likely to have these around even in extreme memory
    480 	 * starvation.
    481 	 */
    482 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    483 
    484 	pmap_md_init();
    485 
    486 	/*
    487 	 * Now it is safe to enable pv entry recording.
    488 	 */
    489 	pmap_initialized = true;
    490 }
    491 
    492 /*
    493  *	Create and return a physical map.
    494  *
    495  *	If the size specified for the map
    496  *	is zero, the map is an actual physical
    497  *	map, and may be referenced by the
    498  *	hardware.
    499  *
    500  *	If the size specified is non-zero,
    501  *	the map will be used in software only, and
    502  *	is bounded by that size.
    503  */
    504 pmap_t
    505 pmap_create(void)
    506 {
    507 	pmap_t pmap;
    508 
    509 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    510 	PMAP_COUNT(create);
    511 
    512 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    513 	memset(pmap, 0, PMAP_SIZE);
    514 
    515 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    516 
    517 	pmap->pm_count = 1;
    518 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    519 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    520 
    521 	pmap_segtab_init(pmap);
    522 
    523 #ifdef MULTIPROCESSOR
    524 	kcpuset_create(&pmap->pm_active, true);
    525 	kcpuset_create(&pmap->pm_onproc, true);
    526 #endif
    527 
    528 	UVMHIST_LOG(pmaphist, "<- pmap %p", pmap,0,0,0);
    529 	return pmap;
    530 }
    531 
    532 /*
    533  *	Retire the given physical map from service.
    534  *	Should only be called if the map contains
    535  *	no valid mappings.
    536  */
    537 void
    538 pmap_destroy(pmap_t pmap)
    539 {
    540 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    541 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    542 
    543 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    544 		PMAP_COUNT(dereference);
    545 		return;
    546 	}
    547 
    548 	KASSERT(pmap->pm_count == 0);
    549 	PMAP_COUNT(destroy);
    550 	kpreempt_disable();
    551 	pmap_md_tlb_miss_lock_enter();
    552 	pmap_tlb_asid_release_all(pmap);
    553 	pmap_segtab_destroy(pmap, NULL, 0);
    554 	pmap_md_tlb_miss_lock_exit();
    555 
    556 #ifdef MULTIPROCESSOR
    557 	kcpuset_destroy(pmap->pm_active);
    558 	kcpuset_destroy(pmap->pm_onproc);
    559 #endif
    560 
    561 	pool_put(&pmap_pmap_pool, pmap);
    562 	kpreempt_enable();
    563 
    564 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    565 }
    566 
    567 /*
    568  *	Add a reference to the specified pmap.
    569  */
    570 void
    571 pmap_reference(pmap_t pmap)
    572 {
    573 
    574 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    575 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    576 	PMAP_COUNT(reference);
    577 
    578 	if (pmap != NULL) {
    579 		atomic_inc_uint(&pmap->pm_count);
    580 	}
    581 
    582 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    583 }
    584 
    585 /*
    586  *	Make a new pmap (vmspace) active for the given process.
    587  */
    588 void
    589 pmap_activate(struct lwp *l)
    590 {
    591 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    592 
    593 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    594 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    595 	PMAP_COUNT(activate);
    596 
    597 	kpreempt_disable();
    598 	pmap_md_tlb_miss_lock_enter();
    599 	pmap_tlb_asid_acquire(pmap, l);
    600 	if (l == curlwp) {
    601 		pmap_segtab_activate(pmap, l);
    602 	}
    603 	pmap_md_tlb_miss_lock_exit();
    604 	kpreempt_enable();
    605 
    606 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    607 }
    608 
    609 /*
    610  *	Make a previously active pmap (vmspace) inactive.
    611  */
    612 void
    613 pmap_deactivate(struct lwp *l)
    614 {
    615 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    616 
    617 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    618 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    619 	PMAP_COUNT(deactivate);
    620 
    621 	kpreempt_disable();
    622 	pmap_md_tlb_miss_lock_enter();
    623 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    624 	pmap_tlb_asid_deactivate(pmap);
    625 	pmap_md_tlb_miss_lock_exit();
    626 	kpreempt_enable();
    627 
    628 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    629 }
    630 
    631 void
    632 pmap_update(struct pmap *pmap)
    633 {
    634 
    635 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    636 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    637 	PMAP_COUNT(update);
    638 
    639 	kpreempt_disable();
    640 #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
    641 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    642 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    643 		PMAP_COUNT(shootdown_ipis);
    644 #endif
    645 	pmap_md_tlb_miss_lock_enter();
    646 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
    647 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    648 #endif /* DEBUG */
    649 
    650 	/*
    651 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    652 	 * our ASID.  Now we have to reactivate ourselves.
    653 	 */
    654 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    655 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    656 		pmap_tlb_asid_acquire(pmap, curlwp);
    657 		pmap_segtab_activate(pmap, curlwp);
    658 	}
    659 	pmap_md_tlb_miss_lock_exit();
    660 	kpreempt_enable();
    661 
    662 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    663 }
    664 
    665 /*
    666  *	Remove the given range of addresses from the specified map.
    667  *
    668  *	It is assumed that the start and end are properly
    669  *	rounded to the page size.
    670  */
    671 
    672 static bool
    673 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    674 	uintptr_t flags)
    675 {
    676 	const pt_entry_t npte = flags;
    677 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    678 
    679 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    680 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    681 	    pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva);
    682 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    683 	    ptep, flags, 0, 0);
    684 
    685 	KASSERT(kpreempt_disabled());
    686 
    687 	for (; sva < eva; sva += NBPG, ptep++) {
    688 		pt_entry_t pt_entry = *ptep;
    689 		if (!pte_valid_p(pt_entry))
    690 			continue;
    691 		if (is_kernel_pmap_p)
    692 			PMAP_COUNT(remove_kernel_calls);
    693 		else
    694 			PMAP_COUNT(remove_user_pages);
    695 		if (pte_wired_p(pt_entry))
    696 			pmap->pm_stats.wired_count--;
    697 		pmap->pm_stats.resident_count--;
    698 		struct vm_page *pg = PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    699 		if (__predict_true(pg != NULL)) {
    700 			pmap_remove_pv(pmap, sva, pg,
    701 			   pte_modified_p(pt_entry));
    702 		}
    703 		pmap_md_tlb_miss_lock_enter();
    704 		*ptep = npte;
    705 		/*
    706 		 * Flush the TLB for the given address.
    707 		 */
    708 		pmap_tlb_invalidate_addr(pmap, sva);
    709 		pmap_md_tlb_miss_lock_exit();
    710 	}
    711 	return false;
    712 }
    713 
    714 void
    715 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    716 {
    717 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    718 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    719 
    720 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    721 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR")",
    722 	    pmap, sva, eva, 0);
    723 
    724 	if (is_kernel_pmap_p)
    725 		PMAP_COUNT(remove_kernel_calls);
    726 	else
    727 		PMAP_COUNT(remove_user_calls);
    728 #ifdef PARANOIADIAG
    729 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    730 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    731 		    __func__, sva, eva - 1);
    732 	if (PMAP_IS_ACTIVE(pmap)) {
    733 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    734 		uint32_t asid = tlb_get_asid();
    735 		if (asid != pai->pai_asid) {
    736 			panic("%s: inconsistency for active TLB flush"
    737 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    738 		}
    739 	}
    740 #endif
    741 	kpreempt_disable();
    742 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
    743 	kpreempt_enable();
    744 
    745 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    746 }
    747 
    748 /*
    749  *	pmap_page_protect:
    750  *
    751  *	Lower the permission for all mappings to a given page.
    752  */
    753 void
    754 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    755 {
    756 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    757 	pv_entry_t pv;
    758 	vaddr_t va;
    759 
    760 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    761 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") prot=%#x)",
    762 	    pg, VM_PAGE_TO_PHYS(pg), prot, 0);
    763 	PMAP_COUNT(page_protect);
    764 
    765 	switch (prot) {
    766 	case VM_PROT_READ|VM_PROT_WRITE:
    767 	case VM_PROT_ALL:
    768 		break;
    769 
    770 	/* copy_on_write */
    771 	case VM_PROT_READ:
    772 	case VM_PROT_READ|VM_PROT_EXECUTE:
    773 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    774 		pv = &mdpg->mdpg_first;
    775 		/*
    776 		 * Loop over all current mappings setting/clearing as appropriate.
    777 		 */
    778 		if (pv->pv_pmap != NULL) {
    779 			while (pv != NULL) {
    780 				const pmap_t pmap = pv->pv_pmap;
    781 				const uint16_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    782 				va = pv->pv_va;
    783 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    784 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
    785 				KASSERT(pv->pv_pmap == pmap);
    786 				pmap_update(pmap);
    787 				if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false)) {
    788 					pv = &mdpg->mdpg_first;
    789 				} else {
    790 					pv = pv->pv_next;
    791 				}
    792 			}
    793 		}
    794 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    795 		break;
    796 
    797 	/* remove_all */
    798 	default:
    799 		/*
    800 		 * Do this first so that for each unmapping, pmap_remove_pv
    801 		 * won't try to sync the icache.
    802 		 */
    803 		if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) {
    804 			UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR
    805 			    "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0);
    806 			PMAP_COUNT(exec_uncached_page_protect);
    807 		}
    808 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    809 		pv = &mdpg->mdpg_first;
    810 		while (pv->pv_pmap != NULL) {
    811 			const pmap_t pmap = pv->pv_pmap;
    812 			va = pv->pv_va;
    813 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    814 			pmap_remove(pmap, va, va + PAGE_SIZE);
    815 			pmap_update(pmap);
    816 			(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    817 		}
    818 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    819 	}
    820 
    821 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    822 }
    823 
    824 static bool
    825 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    826 	uintptr_t flags)
    827 {
    828 	const vm_prot_t prot = (flags & VM_PROT_ALL);
    829 
    830 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    831 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    832 	    pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva);
    833 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    834 	    ptep, flags, 0, 0);
    835 
    836 	KASSERT(kpreempt_disabled());
    837 	/*
    838 	 * Change protection on every valid mapping within this segment.
    839 	 */
    840 	for (; sva < eva; sva += NBPG, ptep++) {
    841 		pt_entry_t pt_entry = *ptep;
    842 		if (!pte_valid_p(pt_entry))
    843 			continue;
    844 		struct vm_page * const pg =
    845 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    846 		if (pg != NULL && pte_modified_p(pt_entry)) {
    847 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    848 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
    849 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
    850 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
    851 				if (pte_cached_p(pt_entry)) {
    852 					UVMHIST_LOG(pmapexechist,
    853 					    "pg %p (pa %#"PRIxPADDR"): %s",
    854 					    pg, VM_PAGE_TO_PHYS(pg),
    855 					    "syncicached performed", 0);
    856 					pmap_page_syncicache(pg);
    857 					PMAP_COUNT(exec_synced_protect);
    858 				}
    859 			}
    860 		}
    861 		pt_entry = pte_prot_downgrade(pt_entry, prot);
    862 		if (*ptep != pt_entry) {
    863 			pmap_md_tlb_miss_lock_enter();
    864 			*ptep = pt_entry;
    865 			/*
    866 			 * Update the TLB if needed.
    867 			 */
    868 			pmap_tlb_update_addr(pmap, sva, pt_entry,
    869 			    PMAP_TLB_NEED_IPI);
    870 			pmap_md_tlb_miss_lock_exit();
    871 		}
    872 	}
    873 	return false;
    874 }
    875 
    876 /*
    877  *	Set the physical protection on the
    878  *	specified range of this map as requested.
    879  */
    880 void
    881 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    882 {
    883 
    884 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    885 	UVMHIST_LOG(pmaphist,
    886 	    "  pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR" port=%#x)",
    887 	    pmap, sva, eva, prot);
    888 	PMAP_COUNT(protect);
    889 
    890 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
    891 		pmap_remove(pmap, sva, eva);
    892 		UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    893 		return;
    894 	}
    895 
    896 #ifdef PARANOIADIAG
    897 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    898 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    899 		    __func__, sva, eva - 1);
    900 	if (PMAP_IS_ACTIVE(pmap)) {
    901 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    902 		uint32_t asid = tlb_get_asid();
    903 		if (asid != pai->pai_asid) {
    904 			panic("%s: inconsistency for active TLB update"
    905 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    906 		}
    907 	}
    908 #endif
    909 
    910 	/*
    911 	 * Change protection on every valid mapping within this segment.
    912 	 */
    913 	kpreempt_disable();
    914 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
    915 	kpreempt_enable();
    916 
    917 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    918 }
    919 
    920 #if defined(__PMAP_VIRTUAL_CACHE_ALIASES)
    921 /*
    922  *	pmap_page_cache:
    923  *
    924  *	Change all mappings of a managed page to cached/uncached.
    925  */
    926 static void
    927 pmap_page_cache(struct vm_page *pg, bool cached)
    928 {
    929 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    930 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    931 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)",
    932 	    pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0);
    933 	KASSERT(kpreempt_disabled());
    934 
    935 	if (cached) {
    936 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
    937 		PMAP_COUNT(page_cache_restorations);
    938 	} else {
    939 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
    940 		PMAP_COUNT(page_cache_evictions);
    941 	}
    942 
    943 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
    944 	KASSERT(kpreempt_disabled());
    945 	for (pv_entry_t pv = &mdpg->mdpg_first;
    946 	     pv != NULL;
    947 	     pv = pv->pv_next) {
    948 		pmap_t pmap = pv->pv_pmap;
    949 		vaddr_t va = pv->pv_va;
    950 
    951 		KASSERT(pmap != NULL);
    952 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
    953 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    954 		if (ptep == NULL)
    955 			continue;
    956 		pt_entry_t pt_entry = *ptep;
    957 		if (pte_valid_p(pt_entry)) {
    958 			pt_entry = pte_cached_change(pt_entry, cached);
    959 			pmap_md_tlb_miss_lock_enter();
    960 			*ptep = pt_entry;
    961 			pmap_tlb_update_addr(pmap, va, pt_entry,
    962 			    PMAP_TLB_NEED_IPI);
    963 			pmap_md_tlb_miss_lock_exit();
    964 		}
    965 	}
    966 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    967 }
    968 #endif	/* __PMAP_VIRTUAL_CACHE_ALIASES */
    969 
    970 /*
    971  *	Insert the given physical page (p) at
    972  *	the specified virtual address (v) in the
    973  *	target physical map with the protection requested.
    974  *
    975  *	If specified, the page will be wired down, meaning
    976  *	that the related pte can not be reclaimed.
    977  *
    978  *	NB:  This is the only routine which MAY NOT lazy-evaluate
    979  *	or lose information.  That is, this routine must actually
    980  *	insert this page into the given map NOW.
    981  */
    982 int
    983 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    984 {
    985 	pt_entry_t npte;
    986 	const bool wired = (flags & PMAP_WIRED) != 0;
    987 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    988 #ifdef UVMHIST
    989 	struct kern_history * const histp =
    990 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
    991 #endif
    992 
    993 	UVMHIST_FUNC(__func__);
    994 #define VM_PROT_STRING(prot) \
    995 	&"\0    (R)\0  (W)\0  (RW)\0 (X)\0  (RX)\0 (WX)\0 (RWX)\0"[UVM_PROTECTION(prot)*6]
    996 	UVMHIST_CALLED(*histp);
    997 	UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR,
    998 	    pmap, va, pa, 0);
    999 	UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)",
   1000 	    prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags));
   1001 
   1002 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1003 	if (is_kernel_pmap_p) {
   1004 		PMAP_COUNT(kernel_mappings);
   1005 		if (!good_color)
   1006 			PMAP_COUNT(kernel_mappings_bad);
   1007 	} else {
   1008 		PMAP_COUNT(user_mappings);
   1009 		if (!good_color)
   1010 			PMAP_COUNT(user_mappings_bad);
   1011 	}
   1012 #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
   1013 	if (va < pmap->pm_minaddr || va >= pmap->pm_maxaddr)
   1014 		panic("%s: %s %#"PRIxVADDR" too big",
   1015 		    __func__, is_kernel_pmap_p ? "kva" : "uva", va);
   1016 #endif
   1017 
   1018 	KASSERTMSG(prot & VM_PROT_READ,
   1019 	    "%s: no READ (%#x) in prot %#x", __func__, VM_PROT_READ, prot);
   1020 
   1021 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1022 	struct vm_page_md *mdpg;
   1023 
   1024 	if (pg) {
   1025 		mdpg = VM_PAGE_TO_MD(pg);
   1026 		/* Set page referenced/modified status based on flags */
   1027 		if (flags & VM_PROT_WRITE)
   1028 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1029 		else if (flags & VM_PROT_ALL)
   1030 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1031 
   1032 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1033 		if (!VM_PAGEMD_CACHED(pg))
   1034 			flags |= PMAP_NOCACHE;
   1035 #endif
   1036 
   1037 		PMAP_COUNT(managed_mappings);
   1038 	} else {
   1039 		/*
   1040 		 * Assumption: if it is not part of our managed memory
   1041 		 * then it must be device memory which may be volatile.
   1042 		 */
   1043 		mdpg = NULL;
   1044 		flags |= PMAP_NOCACHE;
   1045 		PMAP_COUNT(unmanaged_mappings);
   1046 	}
   1047 
   1048 	npte = pte_make_enter(pa, mdpg, prot, flags, is_kernel_pmap_p);
   1049 
   1050 	kpreempt_disable();
   1051 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1052 	if (__predict_false(ptep == NULL)) {
   1053 		kpreempt_enable();
   1054 		UVMHIST_LOG(*histp, "<- ENOMEM", 0,0,0,0);
   1055 		return ENOMEM;
   1056 	}
   1057 	pt_entry_t opte = *ptep;
   1058 
   1059 	/* Done after case that may sleep/return. */
   1060 	if (pg)
   1061 		pmap_enter_pv(pmap, va, pg, &npte);
   1062 
   1063 	/*
   1064 	 * Now validate mapping with desired protection/wiring.
   1065 	 * Assume uniform modified and referenced status for all
   1066 	 * MIPS pages in a MACH page.
   1067 	 */
   1068 	if (wired) {
   1069 		pmap->pm_stats.wired_count++;
   1070 		npte = pte_wire_entry(npte);
   1071 	}
   1072 
   1073 	UVMHIST_LOG(*histp, "new pte %#x (pa %#"PRIxPADDR")", npte, pa, 0,0);
   1074 
   1075 	if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) {
   1076 		pmap_remove(pmap, va, va + NBPG);
   1077 		PMAP_COUNT(user_mappings_changed);
   1078 	}
   1079 
   1080 	KASSERT(pte_valid_p(npte));
   1081 	bool resident = pte_valid_p(opte);
   1082 	if (!resident)
   1083 		pmap->pm_stats.resident_count++;
   1084 	pmap_md_tlb_miss_lock_enter();
   1085 	*ptep = npte;
   1086 
   1087 	pmap_tlb_update_addr(pmap, va, npte,
   1088 	    ((flags & VM_PROT_ALL) ? PMAP_TLB_INSERT : 0)
   1089 	    | (resident ? PMAP_TLB_NEED_IPI : 0));
   1090 	pmap_md_tlb_miss_lock_exit();
   1091 	kpreempt_enable();
   1092 
   1093 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1094 		KASSERT(mdpg != NULL);
   1095 		PMAP_COUNT(exec_mappings);
   1096 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1097 			if (!pte_deferred_exec_p(npte)) {
   1098 				UVMHIST_LOG(*histp,
   1099 				    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1100 				    va, pg, "immediate", "");
   1101 				pmap_page_syncicache(pg);
   1102 				pmap_page_set_attributes(mdpg,
   1103 				    VM_PAGEMD_EXECPAGE);
   1104 				PMAP_COUNT(exec_synced_mappings);
   1105 			} else {
   1106 				UVMHIST_LOG(*histp, "va=%#"PRIxVADDR
   1107 				    " pg %p: %s syncicache: pte %#x",
   1108 				    va, pg, "defer", npte);
   1109 			}
   1110 		} else {
   1111 			UVMHIST_LOG(*histp,
   1112 			    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1113 			    va, pg, "no",
   1114 			    (pte_cached_p(npte)
   1115 				? " (already exec)"
   1116 				: " (uncached)"));
   1117 		}
   1118 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1119 		KASSERT(mdpg != NULL);
   1120 		KASSERT(prot & VM_PROT_WRITE);
   1121 		PMAP_COUNT(exec_mappings);
   1122 		pmap_page_syncicache(pg);
   1123 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1124 		UVMHIST_LOG(pmapexechist,
   1125 		    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1126 		    va, pg, "immediate", " (writeable)");
   1127 	}
   1128 
   1129 	if (prot & VM_PROT_EXECUTE) {
   1130 		UVMHIST_LOG(pmapexechist, "<- 0 (OK)", 0,0,0,0);
   1131 	} else {
   1132 		UVMHIST_LOG(pmaphist, "<- 0 (OK)", 0,0,0,0);
   1133 	}
   1134 	return 0;
   1135 }
   1136 
   1137 void
   1138 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1139 {
   1140 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1141 	struct vm_page_md *mdpg;
   1142 
   1143 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1144 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" pa=%#"PRIxPADDR
   1145 	    ", prot=%#x, flags=%#x)", va, pa, prot, flags);
   1146 	PMAP_COUNT(kenter_pa);
   1147 
   1148 	if (pg == NULL) {
   1149 		mdpg = NULL;
   1150 		PMAP_COUNT(kenter_pa_unmanaged);
   1151 		flags |= PMAP_NOCACHE;
   1152 	} else {
   1153 		mdpg = VM_PAGE_TO_MD(pg);
   1154 	}
   1155 
   1156 	if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1157 		PMAP_COUNT(kenter_pa_bad);
   1158 
   1159 	const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1160 	kpreempt_disable();
   1161 	pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0);
   1162 	KASSERT(ptep != NULL);
   1163 	KASSERT(!pte_valid_p(*ptep));
   1164 	pmap_md_tlb_miss_lock_enter();
   1165 	*ptep = npte;
   1166 	/*
   1167 	 * We have the option to force this mapping into the TLB but we
   1168 	 * don't.  Instead let the next reference to the page do it.
   1169 	 */
   1170 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1171 	pmap_md_tlb_miss_lock_exit();
   1172 	kpreempt_enable();
   1173 #if DEBUG > 1
   1174 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1175 		if (((long *)va)[i] != ((long *)pa)[i])
   1176 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1177 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1178 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1179 	}
   1180 #endif
   1181 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1182 }
   1183 
   1184 static bool
   1185 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1186 	uintptr_t flags)
   1187 {
   1188 	const pt_entry_t new_pt_entry = pte_nv_entry(true);
   1189 
   1190 	KASSERT(kpreempt_disabled());
   1191 
   1192 	/*
   1193 	 * Set every pt on every valid mapping within this segment.
   1194 	 */
   1195 	for (; sva < eva; sva += NBPG, ptep++) {
   1196 		pt_entry_t pt_entry = *ptep;
   1197 		if (!pte_valid_p(pt_entry)) {
   1198 			continue;
   1199 		}
   1200 
   1201 		PMAP_COUNT(kremove_pages);
   1202 		struct vm_page * const pg =
   1203 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
   1204 		if (pg != NULL)
   1205 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
   1206 
   1207 		pmap_md_tlb_miss_lock_enter();
   1208 		*ptep = new_pt_entry;
   1209 		pmap_tlb_invalidate_addr(pmap_kernel(), sva);
   1210 		pmap_md_tlb_miss_lock_exit();
   1211 	}
   1212 
   1213 	return false;
   1214 }
   1215 
   1216 void
   1217 pmap_kremove(vaddr_t va, vsize_t len)
   1218 {
   1219 	const vaddr_t sva = trunc_page(va);
   1220 	const vaddr_t eva = round_page(va + len);
   1221 
   1222 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1223 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" len=%#"PRIxVSIZE")",
   1224 	    va, len, 0,0);
   1225 
   1226 	kpreempt_disable();
   1227 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1228 	kpreempt_enable();
   1229 
   1230 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1231 }
   1232 
   1233 void
   1234 pmap_remove_all(struct pmap *pmap)
   1235 {
   1236 	KASSERT(pmap != pmap_kernel());
   1237 
   1238 	kpreempt_disable();
   1239 	/*
   1240 	 * Free all of our ASIDs which means we can skip doing all the
   1241 	 * tlb_invalidate_addrs().
   1242 	 */
   1243 	pmap_md_tlb_miss_lock_enter();
   1244 	pmap_tlb_asid_deactivate(pmap);
   1245 	pmap_tlb_asid_release_all(pmap);
   1246 	pmap_md_tlb_miss_lock_exit();
   1247 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1248 
   1249 	kpreempt_enable();
   1250 }
   1251 
   1252 /*
   1253  *	Routine:	pmap_unwire
   1254  *	Function:	Clear the wired attribute for a map/virtual-address
   1255  *			pair.
   1256  *	In/out conditions:
   1257  *			The mapping must already exist in the pmap.
   1258  */
   1259 void
   1260 pmap_unwire(pmap_t pmap, vaddr_t va)
   1261 {
   1262 
   1263 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1264 	UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1265 	PMAP_COUNT(unwire);
   1266 
   1267 	/*
   1268 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1269 	 */
   1270 #ifdef PARANOIADIAG
   1271 	if (va < pmap->pm_minaddr || pmap->pm_maxaddr <= va)
   1272 		panic("pmap_unwire");
   1273 #endif
   1274 	kpreempt_disable();
   1275 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1276 	pt_entry_t pt_entry = *ptep;
   1277 #ifdef DIAGNOSTIC
   1278 	if (ptep == NULL)
   1279 		panic("%s: pmap %p va %#"PRIxVADDR" invalid STE",
   1280 		    __func__, pmap, va);
   1281 #endif
   1282 
   1283 #ifdef DIAGNOSTIC
   1284 	if (!pte_valid_p(pt_entry))
   1285 		panic("pmap_unwire: pmap %p va %#"PRIxVADDR" invalid PTE",
   1286 		    pmap, va);
   1287 #endif
   1288 
   1289 	if (pte_wired_p(pt_entry)) {
   1290 		pmap_md_tlb_miss_lock_enter();
   1291 		*ptep = pte_unwire_entry(*ptep);
   1292 		pmap_md_tlb_miss_lock_exit();
   1293 		pmap->pm_stats.wired_count--;
   1294 	}
   1295 #ifdef DIAGNOSTIC
   1296 	else {
   1297 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1298 		    __func__, pmap, va);
   1299 	}
   1300 #endif
   1301 	kpreempt_enable();
   1302 }
   1303 
   1304 /*
   1305  *	Routine:	pmap_extract
   1306  *	Function:
   1307  *		Extract the physical page address associated
   1308  *		with the given map/virtual_address pair.
   1309  */
   1310 bool
   1311 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1312 {
   1313 	paddr_t pa;
   1314 
   1315 	//UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1316 	//UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1317 	if (pmap == pmap_kernel()) {
   1318 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1319 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1320 			goto done;
   1321 		}
   1322 		if (pmap_md_io_vaddr_p(va))
   1323 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1324 	}
   1325 	kpreempt_disable();
   1326 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1327 	if (ptep == NULL) {
   1328 		//UVMHIST_LOG(pmaphist, "<- false (not in segmap)", 0,0,0,0);
   1329 		kpreempt_enable();
   1330 		return false;
   1331 	}
   1332 	if (!pte_valid_p(*ptep)) {
   1333 		//UVMHIST_LOG(pmaphist, "<- false (PTE not valid)", 0,0,0,0);
   1334 		kpreempt_enable();
   1335 		return false;
   1336 	}
   1337 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1338 	kpreempt_enable();
   1339 done:
   1340 	if (pap != NULL) {
   1341 		*pap = pa;
   1342 	}
   1343 	//UVMHIST_LOG(pmaphist, "<- true (pa %#"PRIxPADDR")", pa, 0,0,0);
   1344 	return true;
   1345 }
   1346 
   1347 /*
   1348  *	Copy the range specified by src_addr/len
   1349  *	from the source map to the range dst_addr/len
   1350  *	in the destination map.
   1351  *
   1352  *	This routine is only advisory and need not do anything.
   1353  */
   1354 void
   1355 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1356     vaddr_t src_addr)
   1357 {
   1358 
   1359 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1360 	PMAP_COUNT(copy);
   1361 }
   1362 
   1363 /*
   1364  *	pmap_clear_reference:
   1365  *
   1366  *	Clear the reference bit on the specified physical page.
   1367  */
   1368 bool
   1369 pmap_clear_reference(struct vm_page *pg)
   1370 {
   1371 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1372 
   1373 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1374 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))",
   1375 	   pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1376 
   1377 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1378 
   1379 	UVMHIST_LOG(pmaphist, "<- %s", rv ? "true" : "false", 0,0,0);
   1380 
   1381 	return rv;
   1382 }
   1383 
   1384 /*
   1385  *	pmap_is_referenced:
   1386  *
   1387  *	Return whether or not the specified physical page is referenced
   1388  *	by any physical maps.
   1389  */
   1390 bool
   1391 pmap_is_referenced(struct vm_page *pg)
   1392 {
   1393 
   1394 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1395 }
   1396 
   1397 /*
   1398  *	Clear the modify bits on the specified physical page.
   1399  */
   1400 bool
   1401 pmap_clear_modify(struct vm_page *pg)
   1402 {
   1403 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1404 	pv_entry_t pv = &mdpg->mdpg_first;
   1405 	pv_entry_t pv_next;
   1406 	uint16_t gen;
   1407 
   1408 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1409 	UVMHIST_LOG(pmaphist, "(pg=%p (%#"PRIxPADDR"))",
   1410 	    pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1411 	PMAP_COUNT(clear_modify);
   1412 
   1413 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1414 		if (pv->pv_pmap == NULL) {
   1415 			UVMHIST_LOG(pmapexechist,
   1416 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1417 			    pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0);
   1418 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1419 			PMAP_COUNT(exec_uncached_clear_modify);
   1420 		} else {
   1421 			UVMHIST_LOG(pmapexechist,
   1422 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1423 			    pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0);
   1424 			pmap_page_syncicache(pg);
   1425 			PMAP_COUNT(exec_synced_clear_modify);
   1426 		}
   1427 	}
   1428 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1429 		UVMHIST_LOG(pmaphist, "<- false", 0,0,0,0);
   1430 		return false;
   1431 	}
   1432 	if (pv->pv_pmap == NULL) {
   1433 		UVMHIST_LOG(pmaphist, "<- true (no mappings)", 0,0,0,0);
   1434 		return true;
   1435 	}
   1436 
   1437 	/*
   1438 	 * remove write access from any pages that are dirty
   1439 	 * so we can tell if they are written to again later.
   1440 	 * flush the VAC first if there is one.
   1441 	 */
   1442 	kpreempt_disable();
   1443 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, false);
   1444 	for (; pv != NULL; pv = pv_next) {
   1445 		pmap_t pmap = pv->pv_pmap;
   1446 		vaddr_t va = pv->pv_va;
   1447 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1448 		KASSERT(ptep);
   1449 		pv_next = pv->pv_next;
   1450 		pt_entry_t pt_entry = pte_prot_nowrite(*ptep);
   1451 		if (*ptep == pt_entry) {
   1452 			continue;
   1453 		}
   1454 		pmap_md_vca_clean(pg, va, PMAP_WBINV);
   1455 		pmap_md_tlb_miss_lock_enter();
   1456 		*ptep = pt_entry;
   1457 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1458 		pmap_tlb_invalidate_addr(pmap, va);
   1459 		pmap_md_tlb_miss_lock_exit();
   1460 		pmap_update(pmap);
   1461 		if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) {
   1462 			/*
   1463 			 * The list changed!  So restart from the beginning.
   1464 			 */
   1465 			pv_next = &mdpg->mdpg_first;
   1466 		}
   1467 	}
   1468 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1469 	kpreempt_enable();
   1470 
   1471 	UVMHIST_LOG(pmaphist, "<- true (mappings changed)", 0,0,0,0);
   1472 	return true;
   1473 }
   1474 
   1475 /*
   1476  *	pmap_is_modified:
   1477  *
   1478  *	Return whether or not the specified physical page is modified
   1479  *	by any physical maps.
   1480  */
   1481 bool
   1482 pmap_is_modified(struct vm_page *pg)
   1483 {
   1484 
   1485 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1486 }
   1487 
   1488 /*
   1489  *	pmap_set_modified:
   1490  *
   1491  *	Sets the page modified reference bit for the specified page.
   1492  */
   1493 void
   1494 pmap_set_modified(paddr_t pa)
   1495 {
   1496 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1497 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1498 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1499 }
   1500 
   1501 /******************** pv_entry management ********************/
   1502 
   1503 static void
   1504 pmap_check_pvlist(struct vm_page *pg)
   1505 {
   1506 #ifdef PARANOIADIAG
   1507 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1508 	pt_entry_t pv = &mdpg->mdpg_first;
   1509 	if (pv->pv_pmap != NULL) {
   1510 		for (; pv != NULL; pv = pv->pv_next) {
   1511 			KASSERT(!pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1512 		}
   1513 	}
   1514 #endif /* PARANOIADIAG */
   1515 }
   1516 
   1517 /*
   1518  * Enter the pmap and virtual address into the
   1519  * physical to virtual map table.
   1520  */
   1521 void
   1522 pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
   1523 {
   1524 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1525 	pv_entry_t pv, npv, apv;
   1526 	int16_t gen;
   1527 	bool first __unused = false;
   1528 
   1529 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1530 	UVMHIST_LOG(pmaphist,
   1531 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (%#"PRIxPADDR")",
   1532 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1533 	UVMHIST_LOG(pmaphist, "nptep=%p (%#x))", npte, *npte, 0, 0);
   1534 
   1535 	KASSERT(kpreempt_disabled());
   1536 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1537 
   1538 	apv = NULL;
   1539 	pv = &mdpg->mdpg_first;
   1540 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1541 	pmap_check_pvlist(pg);
   1542 again:
   1543 	if (pv->pv_pmap == NULL) {
   1544 		KASSERT(pv->pv_next == NULL);
   1545 		/*
   1546 		 * No entries yet, use header as the first entry
   1547 		 */
   1548 		PMAP_COUNT(primary_mappings);
   1549 		PMAP_COUNT(mappings);
   1550 		first = true;
   1551 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1552 		pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1553 #endif
   1554 		pv->pv_pmap = pmap;
   1555 		pv->pv_va = va;
   1556 	} else {
   1557 		if (pmap_md_vca_add(pg, va, npte))
   1558 			goto again;
   1559 
   1560 		/*
   1561 		 * There is at least one other VA mapping this page.
   1562 		 * Place this entry after the header.
   1563 		 *
   1564 		 * Note: the entry may already be in the table if
   1565 		 * we are only changing the protection bits.
   1566 		 */
   1567 
   1568 #ifdef PARANOIADIAG
   1569 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1570 #endif
   1571 		for (npv = pv; npv; npv = npv->pv_next) {
   1572 			if (pmap == npv->pv_pmap && va == npv->pv_va) {
   1573 #ifdef PARANOIADIAG
   1574 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1575 				pt_entry_t pt_entry = (ptep ? *ptep : 0);
   1576 				if (!pte_valid_p(pt_entry)
   1577 				    || pte_to_paddr(pt_entry) != pa)
   1578 					printf(
   1579 		"pmap_enter_pv: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n",
   1580 					    va, pa, pt_entry);
   1581 #endif
   1582 				PMAP_COUNT(remappings);
   1583 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1584 				if (__predict_false(apv != NULL))
   1585 					pmap_pv_free(apv);
   1586 				return;
   1587 			}
   1588 		}
   1589 		if (__predict_true(apv == NULL)) {
   1590 			/*
   1591 			 * To allocate a PV, we have to release the PVLIST lock
   1592 			 * so get the page generation.  We allocate the PV, and
   1593 			 * then reacquire the lock.
   1594 			 */
   1595 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1596 
   1597 			apv = (pv_entry_t)pmap_pv_alloc();
   1598 			if (apv == NULL)
   1599 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1600 
   1601 			/*
   1602 			 * If the generation has changed, then someone else
   1603 			 * tinkered with this page so we should
   1604 			 * start over.
   1605 			 */
   1606 			uint16_t oldgen = gen;
   1607 			gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1608 			if (gen != oldgen)
   1609 				goto again;
   1610 		}
   1611 		npv = apv;
   1612 		apv = NULL;
   1613 		npv->pv_va = va;
   1614 		npv->pv_pmap = pmap;
   1615 		npv->pv_next = pv->pv_next;
   1616 		pv->pv_next = npv;
   1617 		PMAP_COUNT(mappings);
   1618 	}
   1619 	pmap_check_pvlist(pg);
   1620 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1621 	if (__predict_false(apv != NULL))
   1622 		pmap_pv_free(apv);
   1623 
   1624 	UVMHIST_LOG(pmaphist, "<- done pv=%p%s",
   1625 	    pv, first ? " (first pv)" : "",0,0);
   1626 }
   1627 
   1628 /*
   1629  * Remove a physical to virtual address translation.
   1630  * If cache was inhibited on this page, and there are no more cache
   1631  * conflicts, restore caching.
   1632  * Flush the cache if the last page is removed (should always be cached
   1633  * at this point).
   1634  */
   1635 void
   1636 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   1637 {
   1638 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1639 	pv_entry_t pv, npv;
   1640 	bool last;
   1641 
   1642 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1643 	UVMHIST_LOG(pmaphist,
   1644 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (pa %#"PRIxPADDR")\n",
   1645 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1646 	UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0,0,0);
   1647 
   1648 	KASSERT(kpreempt_disabled());
   1649 	pv = &mdpg->mdpg_first;
   1650 
   1651 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1652 	pmap_check_pvlist(pg);
   1653 
   1654 	/*
   1655 	 * If it is the first entry on the list, it is actually
   1656 	 * in the header and we must copy the following entry up
   1657 	 * to the header.  Otherwise we must search the list for
   1658 	 * the entry.  In either case we free the now unused entry.
   1659 	 */
   1660 
   1661 	last = false;
   1662 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
   1663 		npv = pv->pv_next;
   1664 		if (npv) {
   1665 			*pv = *npv;
   1666 			KASSERT(pv->pv_pmap != NULL);
   1667 		} else {
   1668 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1669 			pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1670 #endif
   1671 			pv->pv_pmap = NULL;
   1672 			last = true;	/* Last mapping removed */
   1673 		}
   1674 		PMAP_COUNT(remove_pvfirst);
   1675 	} else {
   1676 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   1677 			PMAP_COUNT(remove_pvsearch);
   1678 			if (pmap == npv->pv_pmap && va == npv->pv_va)
   1679 				break;
   1680 		}
   1681 		if (npv) {
   1682 			pv->pv_next = npv->pv_next;
   1683 		}
   1684 	}
   1685 	pmap_md_vca_remove(pg, va);
   1686 
   1687 	pmap_check_pvlist(pg);
   1688 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1689 
   1690 	/*
   1691 	 * Free the pv_entry if needed.
   1692 	 */
   1693 	if (npv)
   1694 		pmap_pv_free(npv);
   1695 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   1696 		if (last) {
   1697 			/*
   1698 			 * If this was the page's last mapping, we no longer
   1699 			 * care about its execness.
   1700 			 */
   1701 			UVMHIST_LOG(pmapexechist,
   1702 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1703 			    pg, VM_PAGE_TO_PHYS(pg),
   1704 			    last ? " [last mapping]" : "",
   1705 			    "execpage cleared");
   1706 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1707 			PMAP_COUNT(exec_uncached_remove);
   1708 		} else {
   1709 			/*
   1710 			 * Someone still has it mapped as an executable page
   1711 			 * so we must sync it.
   1712 			 */
   1713 			UVMHIST_LOG(pmapexechist,
   1714 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1715 			    pg, VM_PAGE_TO_PHYS(pg),
   1716 			    last ? " [last mapping]" : "",
   1717 			    "performed syncicache");
   1718 			pmap_page_syncicache(pg);
   1719 			PMAP_COUNT(exec_synced_remove);
   1720 		}
   1721 	}
   1722 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1723 }
   1724 
   1725 #if defined(MULTIPROCESSOR)
   1726 struct pmap_pvlist_info {
   1727 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   1728 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   1729 	volatile u_int pli_lock_index;
   1730 	u_int pli_lock_mask;
   1731 } pmap_pvlist_info;
   1732 
   1733 void
   1734 pmap_pvlist_lock_init(size_t cache_line_size)
   1735 {
   1736 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1737 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   1738 	vaddr_t lock_va = lock_page;
   1739 	if (sizeof(kmutex_t) > cache_line_size) {
   1740 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   1741 	}
   1742 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   1743 	KASSERT((nlocks & (nlocks - 1)) == 0);
   1744 	/*
   1745 	 * Now divide the page into a number of mutexes, one per cacheline.
   1746 	 */
   1747 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   1748 		kmutex_t * const lock = (kmutex_t *)lock_va;
   1749 		mutex_init(lock, MUTEX_DEFAULT, IPL_VM);
   1750 		pli->pli_locks[i] = lock;
   1751 	}
   1752 	pli->pli_lock_mask = nlocks - 1;
   1753 }
   1754 
   1755 uint16_t
   1756 pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1757 {
   1758 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1759 	kmutex_t *lock = mdpg->mdpg_lock;
   1760 	int16_t gen;
   1761 
   1762 	/*
   1763 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   1764 	 * semi-random distribution not based on page color.
   1765 	 */
   1766 	if (__predict_false(lock == NULL)) {
   1767 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   1768 		size_t lockid = locknum & pli->pli_lock_mask;
   1769 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   1770 		/*
   1771 		 * Set the lock.  If some other thread already did, just use
   1772 		 * the one they assigned.
   1773 		 */
   1774 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   1775 		if (lock == NULL) {
   1776 			lock = new_lock;
   1777 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   1778 		}
   1779 	}
   1780 
   1781 	/*
   1782 	 * Now finally lock the pvlists.
   1783 	 */
   1784 	mutex_spin_enter(lock);
   1785 
   1786 	/*
   1787 	 * If the locker will be changing the list, increment the high 16 bits
   1788 	 * of attrs so we use that as a generation number.
   1789 	 */
   1790 	gen = VM_PAGEMD_PVLIST_GEN(mdpg);		/* get old value */
   1791 	if (list_change)
   1792 		atomic_add_int(&mdpg->mdpg_attrs, 0x10000);
   1793 
   1794 	/*
   1795 	 * Return the generation number.
   1796 	 */
   1797 	return gen;
   1798 }
   1799 #else /* !MULTIPROCESSOR */
   1800 void
   1801 pmap_pvlist_lock_init(size_t cache_line_size)
   1802 {
   1803 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM);
   1804 }
   1805 
   1806 #ifdef MODULAR
   1807 uint16_t
   1808 pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1809 {
   1810 	/*
   1811 	 * We just use a global lock.
   1812 	 */
   1813 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   1814 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   1815 	}
   1816 
   1817 	/*
   1818 	 * Now finally lock the pvlists.
   1819 	 */
   1820 	mutex_spin_enter(mdpg->mdpg_lock);
   1821 
   1822 	return 0;
   1823 }
   1824 #endif /* MODULAR */
   1825 #endif /* !MULTIPROCESSOR */
   1826 
   1827 /*
   1828  * pmap_pv_page_alloc:
   1829  *
   1830  *	Allocate a page for the pv_entry pool.
   1831  */
   1832 void *
   1833 pmap_pv_page_alloc(struct pool *pp, int flags)
   1834 {
   1835 	struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   1836 	if (pg == NULL)
   1837 		return NULL;
   1838 
   1839 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   1840 }
   1841 
   1842 /*
   1843  * pmap_pv_page_free:
   1844  *
   1845  *	Free a pv_entry pool page.
   1846  */
   1847 void
   1848 pmap_pv_page_free(struct pool *pp, void *v)
   1849 {
   1850 	vaddr_t va = (vaddr_t)v;
   1851 
   1852 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1853 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1854 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1855 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1856 	pmap_md_vca_remove(pg, va);
   1857 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1858 	uvm_pagefree(pg);
   1859 }
   1860 
   1861 #ifdef PMAP_PREFER
   1862 /*
   1863  * Find first virtual address >= *vap that doesn't cause
   1864  * a cache alias conflict.
   1865  */
   1866 void
   1867 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   1868 {
   1869 	vaddr_t	va;
   1870 	vsize_t d;
   1871 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   1872 
   1873 	PMAP_COUNT(prefer_requests);
   1874 
   1875 	prefer_mask |= pmap_md_cache_prefer_mask();
   1876 
   1877 	if (prefer_mask) {
   1878 		va = *vap;
   1879 
   1880 		d = foff - va;
   1881 		d &= prefer_mask;
   1882 		if (d) {
   1883 			if (td)
   1884 				*vap = trunc_page(va -((-d) & prefer_mask));
   1885 			else
   1886 				*vap = round_page(va + d);
   1887 			PMAP_COUNT(prefer_adjustments);
   1888 		}
   1889 	}
   1890 }
   1891 #endif /* PMAP_PREFER */
   1892 
   1893 #ifdef PMAP_MAP_POOLPAGE
   1894 vaddr_t
   1895 pmap_map_poolpage(paddr_t pa)
   1896 {
   1897 
   1898 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1899 	KASSERT(pg);
   1900 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1901 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1902 
   1903 	const vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
   1904 	pmap_md_vca_add(pg, va, NULL);
   1905 	return va;
   1906 }
   1907 
   1908 paddr_t
   1909 pmap_unmap_poolpage(vaddr_t va)
   1910 {
   1911 
   1912 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1913 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1914 
   1915 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1916 	KASSERT(pg);
   1917 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1918 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1919 	pmap_md_unmap_poolpage(va, NBPG);
   1920 	pmap_md_vca_remove(pg, va);
   1921 
   1922 	return pa;
   1923 }
   1924 #endif /* PMAP_MAP_POOLPAGE */
   1925