Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.14
      1  1.14   msaitoh /*	$NetBSD: pmap.c,v 1.14 2016/07/07 06:55:44 msaitoh Exp $	*/
      2   1.1  christos 
      3   1.1  christos /*-
      4   1.1  christos  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5   1.1  christos  * All rights reserved.
      6   1.1  christos  *
      7   1.1  christos  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  christos  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.1  christos  * NASA Ames Research Center and by Chris G. Demetriou.
     10   1.1  christos  *
     11   1.1  christos  * Redistribution and use in source and binary forms, with or without
     12   1.1  christos  * modification, are permitted provided that the following conditions
     13   1.1  christos  * are met:
     14   1.1  christos  * 1. Redistributions of source code must retain the above copyright
     15   1.1  christos  *    notice, this list of conditions and the following disclaimer.
     16   1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     18   1.1  christos  *    documentation and/or other materials provided with the distribution.
     19   1.1  christos  *
     20   1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1  christos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1  christos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1  christos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1  christos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1  christos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1  christos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1  christos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1  christos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1  christos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1  christos  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1  christos  */
     32   1.1  christos 
     33   1.1  christos /*
     34   1.1  christos  * Copyright (c) 1992, 1993
     35   1.1  christos  *	The Regents of the University of California.  All rights reserved.
     36   1.1  christos  *
     37   1.1  christos  * This code is derived from software contributed to Berkeley by
     38   1.1  christos  * the Systems Programming Group of the University of Utah Computer
     39   1.1  christos  * Science Department and Ralph Campbell.
     40   1.1  christos  *
     41   1.1  christos  * Redistribution and use in source and binary forms, with or without
     42   1.1  christos  * modification, are permitted provided that the following conditions
     43   1.1  christos  * are met:
     44   1.1  christos  * 1. Redistributions of source code must retain the above copyright
     45   1.1  christos  *    notice, this list of conditions and the following disclaimer.
     46   1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     47   1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     48   1.1  christos  *    documentation and/or other materials provided with the distribution.
     49   1.1  christos  * 3. Neither the name of the University nor the names of its contributors
     50   1.1  christos  *    may be used to endorse or promote products derived from this software
     51   1.1  christos  *    without specific prior written permission.
     52   1.1  christos  *
     53   1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54   1.1  christos  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55   1.1  christos  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56   1.1  christos  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57   1.1  christos  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58   1.1  christos  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59   1.1  christos  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60   1.1  christos  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61   1.1  christos  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62   1.1  christos  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63   1.1  christos  * SUCH DAMAGE.
     64   1.1  christos  *
     65   1.1  christos  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66   1.1  christos  */
     67   1.1  christos 
     68   1.1  christos #include <sys/cdefs.h>
     69   1.1  christos 
     70  1.14   msaitoh __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14 2016/07/07 06:55:44 msaitoh Exp $");
     71   1.1  christos 
     72   1.1  christos /*
     73   1.1  christos  *	Manages physical address maps.
     74   1.1  christos  *
     75   1.1  christos  *	In addition to hardware address maps, this
     76   1.1  christos  *	module is called upon to provide software-use-only
     77   1.1  christos  *	maps which may or may not be stored in the same
     78   1.1  christos  *	form as hardware maps.  These pseudo-maps are
     79   1.1  christos  *	used to store intermediate results from copy
     80   1.1  christos  *	operations to and from address spaces.
     81   1.1  christos  *
     82   1.1  christos  *	Since the information managed by this module is
     83   1.1  christos  *	also stored by the logical address mapping module,
     84   1.1  christos  *	this module may throw away valid virtual-to-physical
     85   1.1  christos  *	mappings at almost any time.  However, invalidations
     86   1.1  christos  *	of virtual-to-physical mappings must be done as
     87   1.1  christos  *	requested.
     88   1.1  christos  *
     89   1.1  christos  *	In order to cope with hardware architectures which
     90   1.1  christos  *	make virtual-to-physical map invalidates expensive,
     91   1.1  christos  *	this module may delay invalidate or reduced protection
     92   1.1  christos  *	operations until such time as they are actually
     93   1.1  christos  *	necessary.  This module is given full information as
     94   1.1  christos  *	to which processors are currently using which maps,
     95   1.1  christos  *	and to when physical maps must be made correct.
     96   1.1  christos  */
     97   1.1  christos 
     98   1.1  christos #include "opt_modular.h"
     99   1.1  christos #include "opt_multiprocessor.h"
    100   1.1  christos #include "opt_sysv.h"
    101   1.1  christos 
    102   1.1  christos #define __PMAP_PRIVATE
    103   1.1  christos 
    104   1.1  christos #include <sys/param.h>
    105   1.1  christos #include <sys/systm.h>
    106   1.1  christos #include <sys/proc.h>
    107   1.1  christos #include <sys/buf.h>
    108   1.1  christos #include <sys/pool.h>
    109   1.1  christos #include <sys/atomic.h>
    110   1.1  christos #include <sys/mutex.h>
    111   1.1  christos #include <sys/atomic.h>
    112   1.1  christos #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
    113   1.1  christos 
    114   1.1  christos #include <uvm/uvm.h>
    115   1.1  christos 
    116   1.1  christos #define	PMAP_COUNT(name)	(pmap_evcnt_##name.ev_count++ + 0)
    117   1.1  christos #define PMAP_COUNTER(name, desc) \
    118   1.1  christos static struct evcnt pmap_evcnt_##name = \
    119   1.1  christos 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
    120   1.1  christos EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
    121   1.1  christos 
    122   1.1  christos PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    123   1.1  christos PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    124   1.1  christos PMAP_COUNTER(remove_user_calls, "remove user calls");
    125   1.1  christos PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    126   1.1  christos PMAP_COUNTER(remove_flushes, "remove cache flushes");
    127   1.1  christos PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    128   1.1  christos PMAP_COUNTER(remove_pvfirst, "remove pv first");
    129   1.1  christos PMAP_COUNTER(remove_pvsearch, "remove pv search");
    130   1.1  christos 
    131   1.1  christos PMAP_COUNTER(prefer_requests, "prefer requests");
    132   1.1  christos PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    133   1.1  christos 
    134   1.1  christos PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    135   1.1  christos PMAP_COUNTER(zeroed_pages, "pages zeroed");
    136   1.1  christos PMAP_COUNTER(copied_pages, "pages copied");
    137   1.1  christos 
    138   1.1  christos PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    139   1.1  christos PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    140   1.1  christos PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    141   1.1  christos PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    142   1.1  christos 
    143   1.1  christos PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    144   1.1  christos PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    145   1.1  christos 
    146   1.1  christos PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    147   1.1  christos PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    148   1.1  christos PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    149   1.1  christos PMAP_COUNTER(user_mappings, "user pages mapped");
    150   1.1  christos PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    151   1.1  christos PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    152   1.1  christos PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    153   1.1  christos PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    154   1.1  christos PMAP_COUNTER(managed_mappings, "managed pages mapped");
    155   1.1  christos PMAP_COUNTER(mappings, "pages mapped");
    156   1.1  christos PMAP_COUNTER(remappings, "pages remapped");
    157   1.1  christos PMAP_COUNTER(unmappings, "pages unmapped");
    158   1.1  christos PMAP_COUNTER(primary_mappings, "page initial mappings");
    159   1.1  christos PMAP_COUNTER(primary_unmappings, "page final unmappings");
    160   1.1  christos PMAP_COUNTER(tlb_hit, "page mapping");
    161   1.1  christos 
    162   1.1  christos PMAP_COUNTER(exec_mappings, "exec pages mapped");
    163   1.1  christos PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    164   1.1  christos PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    165   1.1  christos PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    166   1.1  christos PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    167   1.1  christos PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    168   1.1  christos PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    169   1.1  christos PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    170   1.1  christos PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    171   1.1  christos PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    172   1.1  christos PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    173   1.1  christos 
    174   1.1  christos PMAP_COUNTER(create, "creates");
    175   1.1  christos PMAP_COUNTER(reference, "references");
    176   1.1  christos PMAP_COUNTER(dereference, "dereferences");
    177   1.1  christos PMAP_COUNTER(destroy, "destroyed");
    178   1.1  christos PMAP_COUNTER(activate, "activations");
    179   1.1  christos PMAP_COUNTER(deactivate, "deactivations");
    180   1.1  christos PMAP_COUNTER(update, "updates");
    181   1.1  christos #ifdef MULTIPROCESSOR
    182   1.1  christos PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    183   1.1  christos #endif
    184   1.1  christos PMAP_COUNTER(unwire, "unwires");
    185   1.1  christos PMAP_COUNTER(copy, "copies");
    186   1.1  christos PMAP_COUNTER(clear_modify, "clear_modifies");
    187   1.1  christos PMAP_COUNTER(protect, "protects");
    188   1.1  christos PMAP_COUNTER(page_protect, "page_protects");
    189   1.1  christos 
    190   1.1  christos #define PMAP_ASID_RESERVED 0
    191   1.1  christos CTASSERT(PMAP_ASID_RESERVED == 0);
    192   1.1  christos 
    193   1.1  christos /*
    194   1.1  christos  * Initialize the kernel pmap.
    195   1.1  christos  */
    196   1.1  christos #ifdef MULTIPROCESSOR
    197   1.9    nonaka #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
    198   1.1  christos #else
    199   1.1  christos #define	PMAP_SIZE	sizeof(struct pmap)
    200   1.1  christos kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
    201   1.1  christos #endif
    202   1.1  christos 
    203   1.1  christos struct pmap_kernel kernel_pmap_store = {
    204   1.1  christos 	.kernel_pmap = {
    205   1.1  christos 		.pm_count = 1,
    206   1.1  christos 		.pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
    207   1.1  christos 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    208   1.1  christos 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    209   1.1  christos 	},
    210   1.1  christos };
    211   1.1  christos 
    212   1.1  christos struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    213   1.1  christos 
    214  1.12      matt struct pmap_limits pmap_limits = {
    215  1.12      matt 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    216  1.12      matt };
    217   1.1  christos 
    218   1.1  christos #ifdef UVMHIST
    219   1.1  christos static struct kern_history_ent pmapexechistbuf[10000];
    220   1.1  christos static struct kern_history_ent pmaphistbuf[10000];
    221   1.8    nonaka UVMHIST_DEFINE(pmapexechist);
    222   1.8    nonaka UVMHIST_DEFINE(pmaphist);
    223   1.1  christos #endif
    224   1.1  christos 
    225   1.1  christos /*
    226   1.1  christos  * The pools from which pmap structures and sub-structures are allocated.
    227   1.1  christos  */
    228   1.1  christos struct pool pmap_pmap_pool;
    229   1.1  christos struct pool pmap_pv_pool;
    230   1.1  christos 
    231   1.1  christos #ifndef PMAP_PV_LOWAT
    232   1.1  christos #define	PMAP_PV_LOWAT	16
    233   1.1  christos #endif
    234   1.1  christos int		pmap_pv_lowat = PMAP_PV_LOWAT;
    235   1.1  christos 
    236   1.1  christos bool		pmap_initialized = false;
    237   1.1  christos #define	PMAP_PAGE_COLOROK_P(a, b) \
    238   1.1  christos 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    239   1.1  christos u_int		pmap_page_colormask;
    240   1.1  christos 
    241   1.1  christos #define PAGE_IS_MANAGED(pa)	\
    242   1.1  christos 	(pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
    243   1.1  christos 
    244   1.1  christos #define PMAP_IS_ACTIVE(pm)						\
    245   1.1  christos 	((pm) == pmap_kernel() || 					\
    246   1.1  christos 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    247   1.1  christos 
    248   1.1  christos /* Forward function declarations */
    249   1.1  christos void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    250   1.1  christos void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
    251   1.1  christos 
    252   1.1  christos /*
    253   1.1  christos  * PV table management functions.
    254   1.1  christos  */
    255   1.1  christos void	*pmap_pv_page_alloc(struct pool *, int);
    256   1.1  christos void	pmap_pv_page_free(struct pool *, void *);
    257   1.1  christos 
    258   1.1  christos struct pool_allocator pmap_pv_page_allocator = {
    259   1.1  christos 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    260   1.1  christos };
    261   1.1  christos 
    262   1.1  christos #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    263   1.1  christos #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    264   1.1  christos 
    265  1.10    nonaka #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
    266  1.10    nonaka #define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
    267  1.10    nonaka #define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
    268  1.10    nonaka #endif	/* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
    269  1.10    nonaka 
    270   1.1  christos /*
    271   1.1  christos  * Misc. functions.
    272   1.1  christos  */
    273   1.1  christos 
    274   1.1  christos bool
    275   1.1  christos pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    276   1.1  christos {
    277   1.1  christos 	volatile u_int * const attrp = &mdpg->mdpg_attrs;
    278   1.1  christos #ifdef MULTIPROCESSOR
    279   1.1  christos 	for (;;) {
    280   1.1  christos 		u_int old_attr = *attrp;
    281   1.1  christos 		if ((old_attr & clear_attributes) == 0)
    282   1.1  christos 			return false;
    283   1.1  christos 		u_int new_attr = old_attr & ~clear_attributes;
    284   1.1  christos 		if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
    285   1.1  christos 			return true;
    286   1.1  christos 	}
    287   1.1  christos #else
    288   1.1  christos 	u_int old_attr = *attrp;
    289   1.1  christos 	if ((old_attr & clear_attributes) == 0)
    290   1.1  christos 		return false;
    291   1.1  christos 	*attrp &= ~clear_attributes;
    292   1.1  christos 	return true;
    293   1.1  christos #endif
    294   1.1  christos }
    295   1.1  christos 
    296   1.1  christos void
    297   1.1  christos pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    298   1.1  christos {
    299   1.1  christos #ifdef MULTIPROCESSOR
    300   1.1  christos 	atomic_or_uint(&mdpg->mdpg_attrs, set_attributes);
    301   1.1  christos #else
    302   1.1  christos 	mdpg->mdpg_attrs |= set_attributes;
    303   1.1  christos #endif
    304   1.1  christos }
    305   1.1  christos 
    306   1.1  christos static void
    307   1.1  christos pmap_page_syncicache(struct vm_page *pg)
    308   1.1  christos {
    309   1.1  christos #ifndef MULTIPROCESSOR
    310   1.1  christos 	struct pmap * const curpmap = curcpu()->ci_curpm;
    311   1.1  christos #endif
    312   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    313   1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
    314   1.2      matt 	kcpuset_t *onproc;
    315   1.2      matt #ifdef MULTIPROCESSOR
    316   1.2      matt 	kcpuset_create(&onproc, true);
    317   1.3      matt #else
    318   1.3      matt 	onproc = NULL;
    319   1.2      matt #endif
    320   1.1  christos 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    321   1.2      matt 
    322   1.1  christos 	if (pv->pv_pmap != NULL) {
    323   1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
    324   1.1  christos #ifdef MULTIPROCESSOR
    325   1.2      matt 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    326   1.2      matt 			if (kcpuset_match(onproc, kcpuset_running)) {
    327   1.1  christos 				break;
    328   1.1  christos 			}
    329   1.1  christos #else
    330   1.1  christos 			if (pv->pv_pmap == curpmap) {
    331   1.2      matt 				onproc = curcpu()->ci_data.cpu_kcpuset;
    332   1.1  christos 				break;
    333   1.1  christos 			}
    334   1.1  christos #endif
    335   1.1  christos 		}
    336   1.1  christos 	}
    337   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    338   1.1  christos 	kpreempt_disable();
    339   1.1  christos 	pmap_md_page_syncicache(pg, onproc);
    340   1.2      matt #ifdef MULTIPROCESSOR
    341   1.2      matt 	kcpuset_destroy(onproc);
    342   1.2      matt #endif
    343   1.1  christos 	kpreempt_enable();
    344   1.1  christos }
    345   1.1  christos 
    346   1.1  christos /*
    347   1.1  christos  * Define the initial bounds of the kernel virtual address space.
    348   1.1  christos  */
    349   1.1  christos void
    350   1.1  christos pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    351   1.1  christos {
    352   1.1  christos 
    353  1.12      matt 	*vstartp = pmap_limits.virtual_start;
    354  1.12      matt 	*vendp = pmap_limits.virtual_end;
    355   1.1  christos }
    356   1.1  christos 
    357   1.1  christos vaddr_t
    358   1.1  christos pmap_growkernel(vaddr_t maxkvaddr)
    359   1.1  christos {
    360  1.14   msaitoh 	vaddr_t virtual_end = pmap_limits.virtual_end;
    361   1.1  christos 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    362   1.1  christos 
    363   1.1  christos 	/*
    364   1.1  christos 	 * Reserve PTEs for the new KVA space.
    365   1.1  christos 	 */
    366   1.1  christos 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    367   1.1  christos 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    368   1.1  christos 	}
    369   1.1  christos 
    370   1.1  christos 	/*
    371   1.1  christos 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    372   1.1  christos 	 */
    373   1.1  christos 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    374   1.1  christos 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    375   1.1  christos 
    376   1.1  christos 	/*
    377   1.1  christos 	 * Update new end.
    378   1.1  christos 	 */
    379   1.1  christos 	pmap_limits.virtual_end = virtual_end;
    380   1.1  christos 	return virtual_end;
    381   1.1  christos }
    382   1.1  christos 
    383   1.1  christos /*
    384   1.1  christos  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    385   1.1  christos  * This function allows for early dynamic memory allocation until the virtual
    386   1.1  christos  * memory system has been bootstrapped.  After that point, either kmem_alloc
    387   1.1  christos  * or malloc should be used.  This function works by stealing pages from the
    388   1.1  christos  * (to be) managed page pool, then implicitly mapping the pages (by using
    389   1.1  christos  * their k0seg addresses) and zeroing them.
    390   1.1  christos  *
    391   1.1  christos  * It may be used once the physical memory segments have been pre-loaded
    392   1.1  christos  * into the vm_physmem[] array.  Early memory allocation MUST use this
    393   1.1  christos  * interface!  This cannot be used after vm_page_startup(), and will
    394   1.1  christos  * generate a panic if tried.
    395   1.1  christos  *
    396   1.1  christos  * Note that this memory will never be freed, and in essence it is wired
    397   1.1  christos  * down.
    398   1.1  christos  *
    399   1.1  christos  * We must adjust *vstartp and/or *vendp iff we use address space
    400   1.1  christos  * from the kernel virtual address range defined by pmap_virtual_space().
    401   1.1  christos  */
    402   1.1  christos vaddr_t
    403   1.1  christos pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    404   1.1  christos {
    405   1.1  christos 	u_int npgs;
    406   1.1  christos 	paddr_t pa;
    407   1.1  christos 	vaddr_t va;
    408   1.1  christos 
    409   1.1  christos 	size = round_page(size);
    410   1.1  christos 	npgs = atop(size);
    411   1.1  christos 
    412   1.1  christos 	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
    413   1.1  christos 		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
    414   1.1  christos 		if (uvm.page_init_done == true)
    415   1.1  christos 			panic("pmap_steal_memory: called _after_ bootstrap");
    416   1.1  christos 
    417   1.1  christos 		if (seg->avail_start != seg->start ||
    418   1.1  christos 		    seg->avail_start >= seg->avail_end)
    419   1.1  christos 			continue;
    420   1.1  christos 
    421   1.1  christos 		if ((seg->avail_end - seg->avail_start) < npgs)
    422   1.1  christos 			continue;
    423   1.1  christos 
    424   1.1  christos 		/*
    425   1.1  christos 		 * There are enough pages here; steal them!
    426   1.1  christos 		 */
    427   1.1  christos 		pa = ptoa(seg->avail_start);
    428   1.1  christos 		seg->avail_start += npgs;
    429   1.1  christos 		seg->start += npgs;
    430   1.1  christos 
    431   1.1  christos 		/*
    432   1.1  christos 		 * Have we used up this segment?
    433   1.1  christos 		 */
    434   1.1  christos 		if (seg->avail_start == seg->end) {
    435   1.1  christos 			if (vm_nphysseg == 1)
    436   1.1  christos 				panic("pmap_steal_memory: out of memory!");
    437   1.1  christos 
    438   1.1  christos 			/* Remove this segment from the list. */
    439   1.1  christos 			vm_nphysseg--;
    440   1.1  christos 			if (bank < vm_nphysseg)
    441   1.1  christos 				memmove(seg, seg+1,
    442   1.1  christos 				    sizeof(*seg) * (vm_nphysseg - bank));
    443   1.1  christos 		}
    444   1.1  christos 
    445   1.1  christos 		va = pmap_md_map_poolpage(pa, size);
    446   1.1  christos 		memset((void *)va, 0, size);
    447   1.1  christos 		return va;
    448   1.1  christos 	}
    449   1.1  christos 
    450   1.1  christos 	/*
    451   1.1  christos 	 * If we got here, there was no memory left.
    452   1.1  christos 	 */
    453   1.1  christos 	panic("pmap_steal_memory: no memory to steal");
    454   1.1  christos }
    455   1.1  christos 
    456   1.1  christos /*
    457   1.1  christos  *	Initialize the pmap module.
    458   1.1  christos  *	Called by vm_init, to initialize any structures that the pmap
    459   1.1  christos  *	system needs to map virtual memory.
    460   1.1  christos  */
    461   1.1  christos void
    462   1.1  christos pmap_init(void)
    463   1.1  christos {
    464   1.1  christos 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    465   1.1  christos 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    466   1.1  christos 
    467   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    468   1.1  christos 
    469   1.1  christos 	/*
    470   1.1  christos 	 * Initialize the segtab lock.
    471   1.1  christos 	 */
    472   1.1  christos 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    473   1.1  christos 
    474   1.1  christos 	/*
    475   1.1  christos 	 * Set a low water mark on the pv_entry pool, so that we are
    476   1.1  christos 	 * more likely to have these around even in extreme memory
    477   1.1  christos 	 * starvation.
    478   1.1  christos 	 */
    479   1.1  christos 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    480   1.1  christos 
    481   1.1  christos 	pmap_md_init();
    482   1.1  christos 
    483   1.1  christos 	/*
    484   1.1  christos 	 * Now it is safe to enable pv entry recording.
    485   1.1  christos 	 */
    486   1.1  christos 	pmap_initialized = true;
    487   1.1  christos }
    488   1.1  christos 
    489   1.1  christos /*
    490   1.1  christos  *	Create and return a physical map.
    491   1.1  christos  *
    492   1.1  christos  *	If the size specified for the map
    493   1.1  christos  *	is zero, the map is an actual physical
    494   1.1  christos  *	map, and may be referenced by the
    495   1.1  christos  *	hardware.
    496   1.1  christos  *
    497   1.1  christos  *	If the size specified is non-zero,
    498   1.1  christos  *	the map will be used in software only, and
    499   1.1  christos  *	is bounded by that size.
    500   1.1  christos  */
    501   1.1  christos pmap_t
    502   1.1  christos pmap_create(void)
    503   1.1  christos {
    504   1.1  christos 	pmap_t pmap;
    505   1.1  christos 
    506   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    507   1.1  christos 	PMAP_COUNT(create);
    508   1.1  christos 
    509   1.1  christos 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    510   1.1  christos 	memset(pmap, 0, PMAP_SIZE);
    511   1.1  christos 
    512   1.1  christos 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    513   1.1  christos 
    514   1.1  christos 	pmap->pm_count = 1;
    515   1.1  christos 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    516   1.1  christos 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    517   1.1  christos 
    518   1.1  christos 	pmap_segtab_init(pmap);
    519   1.1  christos 
    520   1.5    nonaka #ifdef MULTIPROCESSOR
    521   1.5    nonaka 	kcpuset_create(&pmap->pm_active, true);
    522   1.5    nonaka 	kcpuset_create(&pmap->pm_onproc, true);
    523   1.5    nonaka #endif
    524   1.5    nonaka 
    525   1.1  christos 	UVMHIST_LOG(pmaphist, "<- pmap %p", pmap,0,0,0);
    526   1.1  christos 	return pmap;
    527   1.1  christos }
    528   1.1  christos 
    529   1.1  christos /*
    530   1.1  christos  *	Retire the given physical map from service.
    531   1.1  christos  *	Should only be called if the map contains
    532   1.1  christos  *	no valid mappings.
    533   1.1  christos  */
    534   1.1  christos void
    535   1.1  christos pmap_destroy(pmap_t pmap)
    536   1.1  christos {
    537   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    538   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    539   1.1  christos 
    540   1.1  christos 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    541   1.1  christos 		PMAP_COUNT(dereference);
    542   1.1  christos 		return;
    543   1.1  christos 	}
    544   1.1  christos 
    545   1.1  christos 	KASSERT(pmap->pm_count == 0);
    546   1.1  christos 	PMAP_COUNT(destroy);
    547   1.1  christos 	kpreempt_disable();
    548  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
    549   1.1  christos 	pmap_tlb_asid_release_all(pmap);
    550   1.1  christos 	pmap_segtab_destroy(pmap, NULL, 0);
    551  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
    552   1.1  christos 
    553   1.6    nonaka #ifdef MULTIPROCESSOR
    554   1.7    nonaka 	kcpuset_destroy(pmap->pm_active);
    555   1.7    nonaka 	kcpuset_destroy(pmap->pm_onproc);
    556   1.6    nonaka #endif
    557   1.6    nonaka 
    558   1.1  christos 	pool_put(&pmap_pmap_pool, pmap);
    559   1.1  christos 	kpreempt_enable();
    560   1.1  christos 
    561   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    562   1.1  christos }
    563   1.1  christos 
    564   1.1  christos /*
    565   1.1  christos  *	Add a reference to the specified pmap.
    566   1.1  christos  */
    567   1.1  christos void
    568   1.1  christos pmap_reference(pmap_t pmap)
    569   1.1  christos {
    570   1.1  christos 
    571   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    572   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    573   1.1  christos 	PMAP_COUNT(reference);
    574   1.1  christos 
    575   1.1  christos 	if (pmap != NULL) {
    576   1.1  christos 		atomic_inc_uint(&pmap->pm_count);
    577   1.1  christos 	}
    578   1.1  christos 
    579   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    580   1.1  christos }
    581   1.1  christos 
    582   1.1  christos /*
    583   1.1  christos  *	Make a new pmap (vmspace) active for the given process.
    584   1.1  christos  */
    585   1.1  christos void
    586   1.1  christos pmap_activate(struct lwp *l)
    587   1.1  christos {
    588   1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    589   1.1  christos 
    590   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    591   1.1  christos 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    592   1.1  christos 	PMAP_COUNT(activate);
    593   1.1  christos 
    594   1.1  christos 	kpreempt_disable();
    595  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
    596   1.1  christos 	pmap_tlb_asid_acquire(pmap, l);
    597   1.1  christos 	if (l == curlwp) {
    598   1.1  christos 		pmap_segtab_activate(pmap, l);
    599   1.1  christos 	}
    600  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
    601   1.1  christos 	kpreempt_enable();
    602   1.1  christos 
    603   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    604   1.1  christos }
    605   1.1  christos 
    606   1.1  christos /*
    607   1.1  christos  *	Make a previously active pmap (vmspace) inactive.
    608   1.1  christos  */
    609   1.1  christos void
    610   1.1  christos pmap_deactivate(struct lwp *l)
    611   1.1  christos {
    612   1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    613   1.1  christos 
    614   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    615   1.1  christos 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    616   1.1  christos 	PMAP_COUNT(deactivate);
    617   1.1  christos 
    618   1.1  christos 	kpreempt_disable();
    619  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
    620   1.1  christos 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    621   1.1  christos 	pmap_tlb_asid_deactivate(pmap);
    622  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
    623   1.1  christos 	kpreempt_enable();
    624   1.1  christos 
    625   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    626   1.1  christos }
    627   1.1  christos 
    628   1.1  christos void
    629   1.1  christos pmap_update(struct pmap *pmap)
    630   1.1  christos {
    631   1.1  christos 
    632   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    633   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    634   1.1  christos 	PMAP_COUNT(update);
    635   1.1  christos 
    636   1.1  christos 	kpreempt_disable();
    637   1.1  christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
    638   1.1  christos 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    639   1.1  christos 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    640   1.1  christos 		PMAP_COUNT(shootdown_ipis);
    641   1.1  christos #endif
    642  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
    643  1.11    nonaka #if defined(DEBUG) && !defined(MULTIPROCESSOR)
    644   1.1  christos 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    645   1.1  christos #endif /* DEBUG */
    646   1.1  christos 
    647   1.1  christos 	/*
    648   1.1  christos 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    649   1.1  christos 	 * our ASID.  Now we have to reactivate ourselves.
    650   1.1  christos 	 */
    651   1.1  christos 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    652   1.1  christos 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    653   1.1  christos 		pmap_tlb_asid_acquire(pmap, curlwp);
    654   1.1  christos 		pmap_segtab_activate(pmap, curlwp);
    655   1.1  christos 	}
    656  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
    657   1.1  christos 	kpreempt_enable();
    658   1.1  christos 
    659   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    660   1.1  christos }
    661   1.1  christos 
    662   1.1  christos /*
    663   1.1  christos  *	Remove the given range of addresses from the specified map.
    664   1.1  christos  *
    665   1.1  christos  *	It is assumed that the start and end are properly
    666   1.1  christos  *	rounded to the page size.
    667   1.1  christos  */
    668   1.1  christos 
    669   1.1  christos static bool
    670   1.1  christos pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    671   1.1  christos 	uintptr_t flags)
    672   1.1  christos {
    673   1.1  christos 	const pt_entry_t npte = flags;
    674   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    675   1.1  christos 
    676   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    677   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    678   1.1  christos 	    pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva);
    679   1.1  christos 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    680   1.1  christos 	    ptep, flags, 0, 0);
    681   1.1  christos 
    682   1.1  christos 	KASSERT(kpreempt_disabled());
    683   1.1  christos 
    684   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
    685   1.1  christos 		pt_entry_t pt_entry = *ptep;
    686   1.1  christos 		if (!pte_valid_p(pt_entry))
    687   1.1  christos 			continue;
    688   1.1  christos 		if (is_kernel_pmap_p)
    689   1.1  christos 			PMAP_COUNT(remove_kernel_calls);
    690   1.1  christos 		else
    691   1.1  christos 			PMAP_COUNT(remove_user_pages);
    692   1.1  christos 		if (pte_wired_p(pt_entry))
    693   1.1  christos 			pmap->pm_stats.wired_count--;
    694   1.1  christos 		pmap->pm_stats.resident_count--;
    695   1.1  christos 		struct vm_page *pg = PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    696   1.1  christos 		if (__predict_true(pg != NULL)) {
    697   1.1  christos 			pmap_remove_pv(pmap, sva, pg,
    698   1.1  christos 			   pte_modified_p(pt_entry));
    699   1.1  christos 		}
    700  1.10    nonaka 		pmap_md_tlb_miss_lock_enter();
    701   1.1  christos 		*ptep = npte;
    702   1.1  christos 		/*
    703   1.1  christos 		 * Flush the TLB for the given address.
    704   1.1  christos 		 */
    705   1.1  christos 		pmap_tlb_invalidate_addr(pmap, sva);
    706  1.10    nonaka 		pmap_md_tlb_miss_lock_exit();
    707   1.1  christos 	}
    708   1.1  christos 	return false;
    709   1.1  christos }
    710   1.1  christos 
    711   1.1  christos void
    712   1.1  christos pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    713   1.1  christos {
    714   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    715   1.1  christos 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    716   1.1  christos 
    717   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    718   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR")",
    719   1.1  christos 	    pmap, sva, eva, 0);
    720   1.1  christos 
    721   1.1  christos 	if (is_kernel_pmap_p)
    722   1.1  christos 		PMAP_COUNT(remove_kernel_calls);
    723   1.1  christos 	else
    724   1.1  christos 		PMAP_COUNT(remove_user_calls);
    725   1.1  christos #ifdef PARANOIADIAG
    726   1.1  christos 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    727   1.1  christos 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    728   1.1  christos 		    __func__, sva, eva - 1);
    729   1.1  christos 	if (PMAP_IS_ACTIVE(pmap)) {
    730   1.1  christos 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    731   1.1  christos 		uint32_t asid = tlb_get_asid();
    732   1.1  christos 		if (asid != pai->pai_asid) {
    733   1.1  christos 			panic("%s: inconsistency for active TLB flush"
    734   1.1  christos 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    735   1.1  christos 		}
    736   1.1  christos 	}
    737   1.1  christos #endif
    738   1.1  christos 	kpreempt_disable();
    739   1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
    740   1.1  christos 	kpreempt_enable();
    741   1.1  christos 
    742   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    743   1.1  christos }
    744   1.1  christos 
    745   1.1  christos /*
    746   1.1  christos  *	pmap_page_protect:
    747   1.1  christos  *
    748   1.1  christos  *	Lower the permission for all mappings to a given page.
    749   1.1  christos  */
    750   1.1  christos void
    751   1.1  christos pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    752   1.1  christos {
    753   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    754   1.1  christos 	pv_entry_t pv;
    755   1.1  christos 	vaddr_t va;
    756   1.1  christos 
    757   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    758   1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") prot=%#x)",
    759   1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), prot, 0);
    760   1.1  christos 	PMAP_COUNT(page_protect);
    761   1.1  christos 
    762   1.1  christos 	switch (prot) {
    763   1.1  christos 	case VM_PROT_READ|VM_PROT_WRITE:
    764   1.1  christos 	case VM_PROT_ALL:
    765   1.1  christos 		break;
    766   1.1  christos 
    767   1.1  christos 	/* copy_on_write */
    768   1.1  christos 	case VM_PROT_READ:
    769   1.1  christos 	case VM_PROT_READ|VM_PROT_EXECUTE:
    770   1.1  christos 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    771   1.1  christos 		pv = &mdpg->mdpg_first;
    772   1.1  christos 		/*
    773   1.1  christos 		 * Loop over all current mappings setting/clearing as appropriate.
    774   1.1  christos 		 */
    775   1.1  christos 		if (pv->pv_pmap != NULL) {
    776   1.1  christos 			while (pv != NULL) {
    777   1.1  christos 				const pmap_t pmap = pv->pv_pmap;
    778   1.1  christos 				const uint16_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    779   1.1  christos 				va = pv->pv_va;
    780   1.1  christos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    781   1.1  christos 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
    782   1.1  christos 				KASSERT(pv->pv_pmap == pmap);
    783   1.1  christos 				pmap_update(pmap);
    784   1.1  christos 				if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false)) {
    785   1.1  christos 					pv = &mdpg->mdpg_first;
    786   1.1  christos 				} else {
    787   1.1  christos 					pv = pv->pv_next;
    788   1.1  christos 				}
    789   1.1  christos 			}
    790   1.1  christos 		}
    791   1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    792   1.1  christos 		break;
    793   1.1  christos 
    794   1.1  christos 	/* remove_all */
    795   1.1  christos 	default:
    796   1.1  christos 		/*
    797   1.1  christos 		 * Do this first so that for each unmapping, pmap_remove_pv
    798   1.1  christos 		 * won't try to sync the icache.
    799   1.1  christos 		 */
    800   1.1  christos 		if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) {
    801   1.1  christos 			UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR
    802   1.1  christos 			    "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0);
    803   1.1  christos 			PMAP_COUNT(exec_uncached_page_protect);
    804   1.1  christos 		}
    805   1.1  christos 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    806   1.1  christos 		pv = &mdpg->mdpg_first;
    807   1.1  christos 		while (pv->pv_pmap != NULL) {
    808   1.1  christos 			const pmap_t pmap = pv->pv_pmap;
    809   1.1  christos 			va = pv->pv_va;
    810   1.1  christos 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    811   1.1  christos 			pmap_remove(pmap, va, va + PAGE_SIZE);
    812   1.1  christos 			pmap_update(pmap);
    813   1.1  christos 			(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    814   1.1  christos 		}
    815   1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    816   1.1  christos 	}
    817   1.1  christos 
    818   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    819   1.1  christos }
    820   1.1  christos 
    821   1.1  christos static bool
    822   1.1  christos pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    823   1.1  christos 	uintptr_t flags)
    824   1.1  christos {
    825   1.1  christos 	const vm_prot_t prot = (flags & VM_PROT_ALL);
    826   1.1  christos 
    827   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    828   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    829   1.1  christos 	    pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva);
    830   1.1  christos 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    831   1.1  christos 	    ptep, flags, 0, 0);
    832   1.1  christos 
    833   1.1  christos 	KASSERT(kpreempt_disabled());
    834   1.1  christos 	/*
    835   1.1  christos 	 * Change protection on every valid mapping within this segment.
    836   1.1  christos 	 */
    837   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
    838   1.1  christos 		pt_entry_t pt_entry = *ptep;
    839   1.1  christos 		if (!pte_valid_p(pt_entry))
    840   1.1  christos 			continue;
    841   1.1  christos 		struct vm_page * const pg =
    842   1.1  christos 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    843   1.1  christos 		if (pg != NULL && pte_modified_p(pt_entry)) {
    844   1.1  christos 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    845   1.1  christos 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
    846   1.1  christos 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
    847   1.1  christos 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
    848   1.1  christos 				if (pte_cached_p(pt_entry)) {
    849   1.1  christos 					UVMHIST_LOG(pmapexechist,
    850   1.1  christos 					    "pg %p (pa %#"PRIxPADDR"): %s",
    851   1.1  christos 					    pg, VM_PAGE_TO_PHYS(pg),
    852   1.1  christos 					    "syncicached performed", 0);
    853   1.1  christos 					pmap_page_syncicache(pg);
    854   1.1  christos 					PMAP_COUNT(exec_synced_protect);
    855   1.1  christos 				}
    856   1.1  christos 			}
    857   1.1  christos 		}
    858   1.1  christos 		pt_entry = pte_prot_downgrade(pt_entry, prot);
    859   1.1  christos 		if (*ptep != pt_entry) {
    860  1.10    nonaka 			pmap_md_tlb_miss_lock_enter();
    861   1.1  christos 			*ptep = pt_entry;
    862   1.1  christos 			/*
    863   1.1  christos 			 * Update the TLB if needed.
    864   1.1  christos 			 */
    865   1.1  christos 			pmap_tlb_update_addr(pmap, sva, pt_entry,
    866   1.1  christos 			    PMAP_TLB_NEED_IPI);
    867  1.10    nonaka 			pmap_md_tlb_miss_lock_exit();
    868   1.1  christos 		}
    869   1.1  christos 	}
    870   1.1  christos 	return false;
    871   1.1  christos }
    872   1.1  christos 
    873   1.1  christos /*
    874   1.1  christos  *	Set the physical protection on the
    875   1.1  christos  *	specified range of this map as requested.
    876   1.1  christos  */
    877   1.1  christos void
    878   1.1  christos pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    879   1.1  christos {
    880   1.1  christos 
    881   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    882   1.1  christos 	UVMHIST_LOG(pmaphist,
    883   1.1  christos 	    "  pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR" port=%#x)",
    884   1.1  christos 	    pmap, sva, eva, prot);
    885   1.1  christos 	PMAP_COUNT(protect);
    886   1.1  christos 
    887   1.1  christos 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
    888   1.1  christos 		pmap_remove(pmap, sva, eva);
    889   1.1  christos 		UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    890   1.1  christos 		return;
    891   1.1  christos 	}
    892   1.1  christos 
    893   1.1  christos #ifdef PARANOIADIAG
    894   1.1  christos 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    895   1.1  christos 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    896   1.1  christos 		    __func__, sva, eva - 1);
    897   1.1  christos 	if (PMAP_IS_ACTIVE(pmap)) {
    898   1.1  christos 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    899   1.1  christos 		uint32_t asid = tlb_get_asid();
    900   1.1  christos 		if (asid != pai->pai_asid) {
    901   1.1  christos 			panic("%s: inconsistency for active TLB update"
    902   1.1  christos 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    903   1.1  christos 		}
    904   1.1  christos 	}
    905   1.1  christos #endif
    906   1.1  christos 
    907   1.1  christos 	/*
    908   1.1  christos 	 * Change protection on every valid mapping within this segment.
    909   1.1  christos 	 */
    910   1.1  christos 	kpreempt_disable();
    911   1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
    912   1.1  christos 	kpreempt_enable();
    913   1.1  christos 
    914   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    915   1.1  christos }
    916   1.1  christos 
    917   1.1  christos #if defined(__PMAP_VIRTUAL_CACHE_ALIASES)
    918   1.1  christos /*
    919   1.1  christos  *	pmap_page_cache:
    920   1.1  christos  *
    921   1.1  christos  *	Change all mappings of a managed page to cached/uncached.
    922   1.1  christos  */
    923   1.1  christos static void
    924   1.1  christos pmap_page_cache(struct vm_page *pg, bool cached)
    925   1.1  christos {
    926   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    927   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    928   1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)",
    929   1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0);
    930   1.1  christos 	KASSERT(kpreempt_disabled());
    931   1.1  christos 
    932   1.1  christos 	if (cached) {
    933   1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
    934   1.1  christos 		PMAP_COUNT(page_cache_restorations);
    935   1.1  christos 	} else {
    936   1.1  christos 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
    937   1.1  christos 		PMAP_COUNT(page_cache_evictions);
    938   1.1  christos 	}
    939   1.1  christos 
    940   1.1  christos 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
    941   1.1  christos 	KASSERT(kpreempt_disabled());
    942   1.1  christos 	for (pv_entry_t pv = &mdpg->mdpg_first;
    943   1.1  christos 	     pv != NULL;
    944   1.1  christos 	     pv = pv->pv_next) {
    945   1.1  christos 		pmap_t pmap = pv->pv_pmap;
    946   1.1  christos 		vaddr_t va = pv->pv_va;
    947   1.1  christos 
    948   1.1  christos 		KASSERT(pmap != NULL);
    949   1.1  christos 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
    950   1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    951   1.1  christos 		if (ptep == NULL)
    952   1.1  christos 			continue;
    953   1.1  christos 		pt_entry_t pt_entry = *ptep;
    954   1.1  christos 		if (pte_valid_p(pt_entry)) {
    955   1.1  christos 			pt_entry = pte_cached_change(pt_entry, cached);
    956  1.10    nonaka 			pmap_md_tlb_miss_lock_enter();
    957   1.1  christos 			*ptep = pt_entry;
    958   1.1  christos 			pmap_tlb_update_addr(pmap, va, pt_entry,
    959   1.1  christos 			    PMAP_TLB_NEED_IPI);
    960  1.10    nonaka 			pmap_md_tlb_miss_lock_exit();
    961   1.1  christos 		}
    962   1.1  christos 	}
    963   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    964   1.1  christos }
    965   1.1  christos #endif	/* __PMAP_VIRTUAL_CACHE_ALIASES */
    966   1.1  christos 
    967   1.1  christos /*
    968   1.1  christos  *	Insert the given physical page (p) at
    969   1.1  christos  *	the specified virtual address (v) in the
    970   1.1  christos  *	target physical map with the protection requested.
    971   1.1  christos  *
    972   1.1  christos  *	If specified, the page will be wired down, meaning
    973   1.1  christos  *	that the related pte can not be reclaimed.
    974   1.1  christos  *
    975   1.1  christos  *	NB:  This is the only routine which MAY NOT lazy-evaluate
    976   1.1  christos  *	or lose information.  That is, this routine must actually
    977   1.1  christos  *	insert this page into the given map NOW.
    978   1.1  christos  */
    979   1.1  christos int
    980   1.1  christos pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    981   1.1  christos {
    982   1.1  christos 	pt_entry_t npte;
    983   1.1  christos 	const bool wired = (flags & PMAP_WIRED) != 0;
    984   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    985   1.1  christos #ifdef UVMHIST
    986   1.1  christos 	struct kern_history * const histp =
    987   1.1  christos 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
    988   1.1  christos #endif
    989   1.1  christos 
    990   1.1  christos 	UVMHIST_FUNC(__func__);
    991   1.1  christos #define VM_PROT_STRING(prot) \
    992   1.1  christos 	&"\0    (R)\0  (W)\0  (RW)\0 (X)\0  (RX)\0 (WX)\0 (RWX)\0"[UVM_PROTECTION(prot)*6]
    993   1.1  christos 	UVMHIST_CALLED(*histp);
    994   1.1  christos 	UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR,
    995   1.1  christos 	    pmap, va, pa, 0);
    996   1.1  christos 	UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)",
    997   1.1  christos 	    prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags));
    998   1.1  christos 
    999   1.1  christos 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1000   1.1  christos 	if (is_kernel_pmap_p) {
   1001   1.1  christos 		PMAP_COUNT(kernel_mappings);
   1002   1.1  christos 		if (!good_color)
   1003   1.1  christos 			PMAP_COUNT(kernel_mappings_bad);
   1004   1.1  christos 	} else {
   1005   1.1  christos 		PMAP_COUNT(user_mappings);
   1006   1.1  christos 		if (!good_color)
   1007   1.1  christos 			PMAP_COUNT(user_mappings_bad);
   1008   1.1  christos 	}
   1009   1.1  christos #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
   1010   1.1  christos 	if (va < pmap->pm_minaddr || va >= pmap->pm_maxaddr)
   1011   1.1  christos 		panic("%s: %s %#"PRIxVADDR" too big",
   1012   1.1  christos 		    __func__, is_kernel_pmap_p ? "kva" : "uva", va);
   1013   1.1  christos #endif
   1014   1.1  christos 
   1015   1.1  christos 	KASSERTMSG(prot & VM_PROT_READ,
   1016   1.1  christos 	    "%s: no READ (%#x) in prot %#x", __func__, VM_PROT_READ, prot);
   1017   1.1  christos 
   1018   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1019   1.1  christos 	struct vm_page_md *mdpg;
   1020   1.1  christos 
   1021   1.1  christos 	if (pg) {
   1022   1.1  christos 		mdpg = VM_PAGE_TO_MD(pg);
   1023   1.1  christos 		/* Set page referenced/modified status based on flags */
   1024   1.1  christos 		if (flags & VM_PROT_WRITE)
   1025   1.1  christos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1026   1.1  christos 		else if (flags & VM_PROT_ALL)
   1027   1.1  christos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1028   1.1  christos 
   1029   1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1030   1.1  christos 		if (!VM_PAGEMD_CACHED(pg))
   1031   1.1  christos 			flags |= PMAP_NOCACHE;
   1032   1.1  christos #endif
   1033   1.1  christos 
   1034   1.1  christos 		PMAP_COUNT(managed_mappings);
   1035   1.1  christos 	} else {
   1036   1.1  christos 		/*
   1037   1.1  christos 		 * Assumption: if it is not part of our managed memory
   1038   1.1  christos 		 * then it must be device memory which may be volatile.
   1039   1.1  christos 		 */
   1040   1.1  christos 		mdpg = NULL;
   1041   1.1  christos 		flags |= PMAP_NOCACHE;
   1042   1.1  christos 		PMAP_COUNT(unmanaged_mappings);
   1043   1.1  christos 	}
   1044   1.1  christos 
   1045   1.1  christos 	npte = pte_make_enter(pa, mdpg, prot, flags, is_kernel_pmap_p);
   1046   1.1  christos 
   1047   1.1  christos 	kpreempt_disable();
   1048   1.1  christos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1049   1.1  christos 	if (__predict_false(ptep == NULL)) {
   1050   1.1  christos 		kpreempt_enable();
   1051   1.1  christos 		UVMHIST_LOG(*histp, "<- ENOMEM", 0,0,0,0);
   1052   1.1  christos 		return ENOMEM;
   1053   1.1  christos 	}
   1054   1.1  christos 	pt_entry_t opte = *ptep;
   1055   1.1  christos 
   1056   1.1  christos 	/* Done after case that may sleep/return. */
   1057   1.1  christos 	if (pg)
   1058   1.1  christos 		pmap_enter_pv(pmap, va, pg, &npte);
   1059   1.1  christos 
   1060   1.1  christos 	/*
   1061   1.1  christos 	 * Now validate mapping with desired protection/wiring.
   1062   1.1  christos 	 * Assume uniform modified and referenced status for all
   1063   1.1  christos 	 * MIPS pages in a MACH page.
   1064   1.1  christos 	 */
   1065   1.1  christos 	if (wired) {
   1066   1.1  christos 		pmap->pm_stats.wired_count++;
   1067   1.1  christos 		npte = pte_wire_entry(npte);
   1068   1.1  christos 	}
   1069   1.1  christos 
   1070   1.1  christos 	UVMHIST_LOG(*histp, "new pte %#x (pa %#"PRIxPADDR")", npte, pa, 0,0);
   1071   1.1  christos 
   1072   1.1  christos 	if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) {
   1073   1.1  christos 		pmap_remove(pmap, va, va + NBPG);
   1074   1.1  christos 		PMAP_COUNT(user_mappings_changed);
   1075   1.1  christos 	}
   1076   1.1  christos 
   1077   1.1  christos 	KASSERT(pte_valid_p(npte));
   1078   1.1  christos 	bool resident = pte_valid_p(opte);
   1079   1.1  christos 	if (!resident)
   1080   1.1  christos 		pmap->pm_stats.resident_count++;
   1081  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
   1082   1.1  christos 	*ptep = npte;
   1083   1.1  christos 
   1084   1.1  christos 	pmap_tlb_update_addr(pmap, va, npte,
   1085   1.1  christos 	    ((flags & VM_PROT_ALL) ? PMAP_TLB_INSERT : 0)
   1086   1.1  christos 	    | (resident ? PMAP_TLB_NEED_IPI : 0));
   1087  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
   1088   1.1  christos 	kpreempt_enable();
   1089   1.1  christos 
   1090   1.1  christos 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1091   1.1  christos 		KASSERT(mdpg != NULL);
   1092   1.1  christos 		PMAP_COUNT(exec_mappings);
   1093   1.1  christos 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1094   1.1  christos 			if (!pte_deferred_exec_p(npte)) {
   1095   1.1  christos 				UVMHIST_LOG(*histp,
   1096   1.1  christos 				    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1097   1.1  christos 				    va, pg, "immediate", "");
   1098   1.1  christos 				pmap_page_syncicache(pg);
   1099   1.1  christos 				pmap_page_set_attributes(mdpg,
   1100   1.1  christos 				    VM_PAGEMD_EXECPAGE);
   1101   1.1  christos 				PMAP_COUNT(exec_synced_mappings);
   1102   1.1  christos 			} else {
   1103   1.1  christos 				UVMHIST_LOG(*histp, "va=%#"PRIxVADDR
   1104   1.1  christos 				    " pg %p: %s syncicache: pte %#x",
   1105   1.1  christos 				    va, pg, "defer", npte);
   1106   1.1  christos 			}
   1107   1.1  christos 		} else {
   1108   1.1  christos 			UVMHIST_LOG(*histp,
   1109   1.1  christos 			    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1110   1.1  christos 			    va, pg, "no",
   1111   1.1  christos 			    (pte_cached_p(npte)
   1112   1.1  christos 				? " (already exec)"
   1113   1.1  christos 				: " (uncached)"));
   1114   1.1  christos 		}
   1115   1.1  christos 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1116   1.1  christos 		KASSERT(mdpg != NULL);
   1117   1.1  christos 		KASSERT(prot & VM_PROT_WRITE);
   1118   1.1  christos 		PMAP_COUNT(exec_mappings);
   1119   1.1  christos 		pmap_page_syncicache(pg);
   1120   1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1121   1.1  christos 		UVMHIST_LOG(pmapexechist,
   1122   1.1  christos 		    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1123   1.1  christos 		    va, pg, "immediate", " (writeable)");
   1124   1.1  christos 	}
   1125   1.1  christos 
   1126   1.1  christos 	if (prot & VM_PROT_EXECUTE) {
   1127   1.1  christos 		UVMHIST_LOG(pmapexechist, "<- 0 (OK)", 0,0,0,0);
   1128   1.1  christos 	} else {
   1129   1.1  christos 		UVMHIST_LOG(pmaphist, "<- 0 (OK)", 0,0,0,0);
   1130   1.1  christos 	}
   1131   1.1  christos 	return 0;
   1132   1.1  christos }
   1133   1.1  christos 
   1134   1.1  christos void
   1135   1.1  christos pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1136   1.1  christos {
   1137   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1138   1.1  christos 	struct vm_page_md *mdpg;
   1139   1.1  christos 
   1140   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1141   1.1  christos 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" pa=%#"PRIxPADDR
   1142   1.1  christos 	    ", prot=%#x, flags=%#x)", va, pa, prot, flags);
   1143   1.1  christos 	PMAP_COUNT(kenter_pa);
   1144   1.1  christos 
   1145   1.1  christos 	if (pg == NULL) {
   1146   1.1  christos 		mdpg = NULL;
   1147   1.1  christos 		PMAP_COUNT(kenter_pa_unmanaged);
   1148   1.1  christos 		flags |= PMAP_NOCACHE;
   1149   1.1  christos 	} else {
   1150   1.1  christos 		mdpg = VM_PAGE_TO_MD(pg);
   1151   1.1  christos 	}
   1152   1.1  christos 
   1153   1.1  christos 	if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1154   1.1  christos 		PMAP_COUNT(kenter_pa_bad);
   1155   1.1  christos 
   1156   1.1  christos 	const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1157   1.1  christos 	kpreempt_disable();
   1158   1.1  christos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0);
   1159   1.1  christos 	KASSERT(ptep != NULL);
   1160   1.1  christos 	KASSERT(!pte_valid_p(*ptep));
   1161  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
   1162   1.1  christos 	*ptep = npte;
   1163   1.1  christos 	/*
   1164   1.1  christos 	 * We have the option to force this mapping into the TLB but we
   1165   1.1  christos 	 * don't.  Instead let the next reference to the page do it.
   1166   1.1  christos 	 */
   1167   1.1  christos 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1168  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
   1169   1.1  christos 	kpreempt_enable();
   1170   1.1  christos #if DEBUG > 1
   1171   1.1  christos 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1172   1.1  christos 		if (((long *)va)[i] != ((long *)pa)[i])
   1173   1.1  christos 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1174   1.1  christos 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1175   1.1  christos 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1176   1.1  christos 	}
   1177   1.1  christos #endif
   1178   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1179   1.1  christos }
   1180   1.1  christos 
   1181   1.1  christos static bool
   1182   1.1  christos pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1183   1.1  christos 	uintptr_t flags)
   1184   1.1  christos {
   1185   1.1  christos 	const pt_entry_t new_pt_entry = pte_nv_entry(true);
   1186   1.1  christos 
   1187   1.1  christos 	KASSERT(kpreempt_disabled());
   1188   1.1  christos 
   1189   1.1  christos 	/*
   1190   1.1  christos 	 * Set every pt on every valid mapping within this segment.
   1191   1.1  christos 	 */
   1192   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
   1193   1.1  christos 		pt_entry_t pt_entry = *ptep;
   1194   1.1  christos 		if (!pte_valid_p(pt_entry)) {
   1195   1.1  christos 			continue;
   1196   1.1  christos 		}
   1197   1.1  christos 
   1198   1.1  christos 		PMAP_COUNT(kremove_pages);
   1199   1.1  christos 		struct vm_page * const pg =
   1200   1.1  christos 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
   1201   1.1  christos 		if (pg != NULL)
   1202   1.1  christos 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
   1203   1.1  christos 
   1204  1.10    nonaka 		pmap_md_tlb_miss_lock_enter();
   1205   1.1  christos 		*ptep = new_pt_entry;
   1206   1.1  christos 		pmap_tlb_invalidate_addr(pmap_kernel(), sva);
   1207  1.10    nonaka 		pmap_md_tlb_miss_lock_exit();
   1208   1.1  christos 	}
   1209   1.1  christos 
   1210   1.1  christos 	return false;
   1211   1.1  christos }
   1212   1.1  christos 
   1213   1.1  christos void
   1214   1.1  christos pmap_kremove(vaddr_t va, vsize_t len)
   1215   1.1  christos {
   1216   1.1  christos 	const vaddr_t sva = trunc_page(va);
   1217   1.1  christos 	const vaddr_t eva = round_page(va + len);
   1218   1.1  christos 
   1219   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1220   1.1  christos 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" len=%#"PRIxVSIZE")",
   1221   1.1  christos 	    va, len, 0,0);
   1222   1.1  christos 
   1223   1.1  christos 	kpreempt_disable();
   1224   1.1  christos 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1225   1.1  christos 	kpreempt_enable();
   1226   1.1  christos 
   1227   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1228   1.1  christos }
   1229   1.1  christos 
   1230   1.1  christos void
   1231   1.1  christos pmap_remove_all(struct pmap *pmap)
   1232   1.1  christos {
   1233   1.1  christos 	KASSERT(pmap != pmap_kernel());
   1234   1.1  christos 
   1235   1.1  christos 	kpreempt_disable();
   1236   1.1  christos 	/*
   1237   1.1  christos 	 * Free all of our ASIDs which means we can skip doing all the
   1238   1.1  christos 	 * tlb_invalidate_addrs().
   1239   1.1  christos 	 */
   1240  1.10    nonaka 	pmap_md_tlb_miss_lock_enter();
   1241   1.1  christos 	pmap_tlb_asid_deactivate(pmap);
   1242   1.1  christos 	pmap_tlb_asid_release_all(pmap);
   1243  1.10    nonaka 	pmap_md_tlb_miss_lock_exit();
   1244   1.1  christos 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1245   1.1  christos 
   1246   1.1  christos 	kpreempt_enable();
   1247   1.1  christos }
   1248   1.1  christos 
   1249   1.1  christos /*
   1250   1.1  christos  *	Routine:	pmap_unwire
   1251   1.1  christos  *	Function:	Clear the wired attribute for a map/virtual-address
   1252   1.1  christos  *			pair.
   1253   1.1  christos  *	In/out conditions:
   1254   1.1  christos  *			The mapping must already exist in the pmap.
   1255   1.1  christos  */
   1256   1.1  christos void
   1257   1.1  christos pmap_unwire(pmap_t pmap, vaddr_t va)
   1258   1.1  christos {
   1259   1.1  christos 
   1260   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1261   1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1262   1.1  christos 	PMAP_COUNT(unwire);
   1263   1.1  christos 
   1264   1.1  christos 	/*
   1265   1.1  christos 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1266   1.1  christos 	 */
   1267   1.1  christos #ifdef PARANOIADIAG
   1268   1.1  christos 	if (va < pmap->pm_minaddr || pmap->pm_maxaddr <= va)
   1269   1.1  christos 		panic("pmap_unwire");
   1270   1.1  christos #endif
   1271   1.1  christos 	kpreempt_disable();
   1272   1.1  christos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1273   1.1  christos 	pt_entry_t pt_entry = *ptep;
   1274   1.1  christos #ifdef DIAGNOSTIC
   1275   1.1  christos 	if (ptep == NULL)
   1276   1.1  christos 		panic("%s: pmap %p va %#"PRIxVADDR" invalid STE",
   1277   1.1  christos 		    __func__, pmap, va);
   1278   1.1  christos #endif
   1279   1.1  christos 
   1280   1.1  christos #ifdef DIAGNOSTIC
   1281   1.1  christos 	if (!pte_valid_p(pt_entry))
   1282   1.1  christos 		panic("pmap_unwire: pmap %p va %#"PRIxVADDR" invalid PTE",
   1283   1.1  christos 		    pmap, va);
   1284   1.1  christos #endif
   1285   1.1  christos 
   1286   1.1  christos 	if (pte_wired_p(pt_entry)) {
   1287  1.10    nonaka 		pmap_md_tlb_miss_lock_enter();
   1288   1.1  christos 		*ptep = pte_unwire_entry(*ptep);
   1289  1.10    nonaka 		pmap_md_tlb_miss_lock_exit();
   1290   1.1  christos 		pmap->pm_stats.wired_count--;
   1291   1.1  christos 	}
   1292   1.1  christos #ifdef DIAGNOSTIC
   1293   1.1  christos 	else {
   1294   1.1  christos 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1295   1.1  christos 		    __func__, pmap, va);
   1296   1.1  christos 	}
   1297   1.1  christos #endif
   1298   1.1  christos 	kpreempt_enable();
   1299   1.1  christos }
   1300   1.1  christos 
   1301   1.1  christos /*
   1302   1.1  christos  *	Routine:	pmap_extract
   1303   1.1  christos  *	Function:
   1304   1.1  christos  *		Extract the physical page address associated
   1305   1.1  christos  *		with the given map/virtual_address pair.
   1306   1.1  christos  */
   1307   1.1  christos bool
   1308   1.1  christos pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1309   1.1  christos {
   1310   1.1  christos 	paddr_t pa;
   1311   1.1  christos 
   1312   1.1  christos 	//UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1313   1.1  christos 	//UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1314   1.1  christos 	if (pmap == pmap_kernel()) {
   1315   1.1  christos 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1316   1.1  christos 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1317   1.1  christos 			goto done;
   1318   1.1  christos 		}
   1319   1.1  christos 		if (pmap_md_io_vaddr_p(va))
   1320   1.1  christos 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1321   1.1  christos 	}
   1322   1.1  christos 	kpreempt_disable();
   1323   1.1  christos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1324   1.1  christos 	if (ptep == NULL) {
   1325   1.1  christos 		//UVMHIST_LOG(pmaphist, "<- false (not in segmap)", 0,0,0,0);
   1326   1.1  christos 		kpreempt_enable();
   1327   1.1  christos 		return false;
   1328   1.1  christos 	}
   1329   1.1  christos 	if (!pte_valid_p(*ptep)) {
   1330   1.1  christos 		//UVMHIST_LOG(pmaphist, "<- false (PTE not valid)", 0,0,0,0);
   1331   1.1  christos 		kpreempt_enable();
   1332   1.1  christos 		return false;
   1333   1.1  christos 	}
   1334   1.1  christos 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1335   1.1  christos 	kpreempt_enable();
   1336   1.1  christos done:
   1337   1.1  christos 	if (pap != NULL) {
   1338   1.1  christos 		*pap = pa;
   1339   1.1  christos 	}
   1340   1.1  christos 	//UVMHIST_LOG(pmaphist, "<- true (pa %#"PRIxPADDR")", pa, 0,0,0);
   1341   1.1  christos 	return true;
   1342   1.1  christos }
   1343   1.1  christos 
   1344   1.1  christos /*
   1345   1.1  christos  *	Copy the range specified by src_addr/len
   1346   1.1  christos  *	from the source map to the range dst_addr/len
   1347   1.1  christos  *	in the destination map.
   1348   1.1  christos  *
   1349   1.1  christos  *	This routine is only advisory and need not do anything.
   1350   1.1  christos  */
   1351   1.1  christos void
   1352   1.1  christos pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1353   1.1  christos     vaddr_t src_addr)
   1354   1.1  christos {
   1355   1.1  christos 
   1356   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1357   1.1  christos 	PMAP_COUNT(copy);
   1358   1.1  christos }
   1359   1.1  christos 
   1360   1.1  christos /*
   1361   1.1  christos  *	pmap_clear_reference:
   1362   1.1  christos  *
   1363   1.1  christos  *	Clear the reference bit on the specified physical page.
   1364   1.1  christos  */
   1365   1.1  christos bool
   1366   1.1  christos pmap_clear_reference(struct vm_page *pg)
   1367   1.1  christos {
   1368   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1369   1.1  christos 
   1370   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1371   1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))",
   1372   1.1  christos 	   pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1373   1.1  christos 
   1374   1.1  christos 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1375   1.1  christos 
   1376   1.1  christos 	UVMHIST_LOG(pmaphist, "<- %s", rv ? "true" : "false", 0,0,0);
   1377   1.1  christos 
   1378   1.1  christos 	return rv;
   1379   1.1  christos }
   1380   1.1  christos 
   1381   1.1  christos /*
   1382   1.1  christos  *	pmap_is_referenced:
   1383   1.1  christos  *
   1384   1.1  christos  *	Return whether or not the specified physical page is referenced
   1385   1.1  christos  *	by any physical maps.
   1386   1.1  christos  */
   1387   1.1  christos bool
   1388   1.1  christos pmap_is_referenced(struct vm_page *pg)
   1389   1.1  christos {
   1390   1.1  christos 
   1391   1.1  christos 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1392   1.1  christos }
   1393   1.1  christos 
   1394   1.1  christos /*
   1395   1.1  christos  *	Clear the modify bits on the specified physical page.
   1396   1.1  christos  */
   1397   1.1  christos bool
   1398   1.1  christos pmap_clear_modify(struct vm_page *pg)
   1399   1.1  christos {
   1400   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1401   1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
   1402   1.1  christos 	pv_entry_t pv_next;
   1403   1.1  christos 	uint16_t gen;
   1404   1.1  christos 
   1405   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1406   1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (%#"PRIxPADDR"))",
   1407   1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1408   1.1  christos 	PMAP_COUNT(clear_modify);
   1409   1.1  christos 
   1410   1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1411   1.1  christos 		if (pv->pv_pmap == NULL) {
   1412   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1413   1.1  christos 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1414   1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0);
   1415   1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1416   1.1  christos 			PMAP_COUNT(exec_uncached_clear_modify);
   1417   1.1  christos 		} else {
   1418   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1419   1.1  christos 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1420   1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0);
   1421   1.1  christos 			pmap_page_syncicache(pg);
   1422   1.1  christos 			PMAP_COUNT(exec_synced_clear_modify);
   1423   1.1  christos 		}
   1424   1.1  christos 	}
   1425   1.1  christos 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1426   1.1  christos 		UVMHIST_LOG(pmaphist, "<- false", 0,0,0,0);
   1427   1.1  christos 		return false;
   1428   1.1  christos 	}
   1429   1.1  christos 	if (pv->pv_pmap == NULL) {
   1430   1.1  christos 		UVMHIST_LOG(pmaphist, "<- true (no mappings)", 0,0,0,0);
   1431   1.1  christos 		return true;
   1432   1.1  christos 	}
   1433   1.1  christos 
   1434   1.1  christos 	/*
   1435   1.1  christos 	 * remove write access from any pages that are dirty
   1436   1.1  christos 	 * so we can tell if they are written to again later.
   1437   1.1  christos 	 * flush the VAC first if there is one.
   1438   1.1  christos 	 */
   1439   1.1  christos 	kpreempt_disable();
   1440   1.1  christos 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, false);
   1441   1.1  christos 	for (; pv != NULL; pv = pv_next) {
   1442   1.1  christos 		pmap_t pmap = pv->pv_pmap;
   1443   1.1  christos 		vaddr_t va = pv->pv_va;
   1444   1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1445   1.1  christos 		KASSERT(ptep);
   1446   1.1  christos 		pv_next = pv->pv_next;
   1447   1.1  christos 		pt_entry_t pt_entry = pte_prot_nowrite(*ptep);
   1448   1.1  christos 		if (*ptep == pt_entry) {
   1449   1.1  christos 			continue;
   1450   1.1  christos 		}
   1451   1.1  christos 		pmap_md_vca_clean(pg, va, PMAP_WBINV);
   1452  1.10    nonaka 		pmap_md_tlb_miss_lock_enter();
   1453   1.1  christos 		*ptep = pt_entry;
   1454   1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1455   1.1  christos 		pmap_tlb_invalidate_addr(pmap, va);
   1456  1.10    nonaka 		pmap_md_tlb_miss_lock_exit();
   1457   1.1  christos 		pmap_update(pmap);
   1458   1.1  christos 		if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) {
   1459   1.1  christos 			/*
   1460   1.1  christos 			 * The list changed!  So restart from the beginning.
   1461   1.1  christos 			 */
   1462   1.1  christos 			pv_next = &mdpg->mdpg_first;
   1463   1.1  christos 		}
   1464   1.1  christos 	}
   1465   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1466   1.1  christos 	kpreempt_enable();
   1467   1.1  christos 
   1468   1.1  christos 	UVMHIST_LOG(pmaphist, "<- true (mappings changed)", 0,0,0,0);
   1469   1.1  christos 	return true;
   1470   1.1  christos }
   1471   1.1  christos 
   1472   1.1  christos /*
   1473   1.1  christos  *	pmap_is_modified:
   1474   1.1  christos  *
   1475   1.1  christos  *	Return whether or not the specified physical page is modified
   1476   1.1  christos  *	by any physical maps.
   1477   1.1  christos  */
   1478   1.1  christos bool
   1479   1.1  christos pmap_is_modified(struct vm_page *pg)
   1480   1.1  christos {
   1481   1.1  christos 
   1482   1.1  christos 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1483   1.1  christos }
   1484   1.1  christos 
   1485   1.1  christos /*
   1486   1.1  christos  *	pmap_set_modified:
   1487   1.1  christos  *
   1488   1.1  christos  *	Sets the page modified reference bit for the specified page.
   1489   1.1  christos  */
   1490   1.1  christos void
   1491   1.1  christos pmap_set_modified(paddr_t pa)
   1492   1.1  christos {
   1493   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1494   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1495   1.1  christos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1496   1.1  christos }
   1497   1.1  christos 
   1498   1.1  christos /******************** pv_entry management ********************/
   1499   1.1  christos 
   1500   1.1  christos static void
   1501   1.1  christos pmap_check_pvlist(struct vm_page *pg)
   1502   1.1  christos {
   1503   1.1  christos #ifdef PARANOIADIAG
   1504   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1505   1.1  christos 	pt_entry_t pv = &mdpg->mdpg_first;
   1506   1.1  christos 	if (pv->pv_pmap != NULL) {
   1507   1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
   1508   1.1  christos 			KASSERT(!pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1509   1.1  christos 		}
   1510   1.1  christos 	}
   1511   1.1  christos #endif /* PARANOIADIAG */
   1512   1.1  christos }
   1513   1.1  christos 
   1514   1.1  christos /*
   1515   1.1  christos  * Enter the pmap and virtual address into the
   1516   1.1  christos  * physical to virtual map table.
   1517   1.1  christos  */
   1518   1.1  christos void
   1519   1.1  christos pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
   1520   1.1  christos {
   1521   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1522   1.1  christos 	pv_entry_t pv, npv, apv;
   1523   1.1  christos 	int16_t gen;
   1524   1.4    martin 	bool first __unused = false;
   1525   1.1  christos 
   1526   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1527   1.1  christos 	UVMHIST_LOG(pmaphist,
   1528   1.1  christos 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (%#"PRIxPADDR")",
   1529   1.1  christos 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1530   1.1  christos 	UVMHIST_LOG(pmaphist, "nptep=%p (%#x))", npte, *npte, 0, 0);
   1531   1.1  christos 
   1532   1.1  christos 	KASSERT(kpreempt_disabled());
   1533   1.1  christos 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1534   1.1  christos 
   1535   1.1  christos 	apv = NULL;
   1536   1.1  christos 	pv = &mdpg->mdpg_first;
   1537   1.1  christos 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1538   1.1  christos 	pmap_check_pvlist(pg);
   1539   1.1  christos again:
   1540   1.1  christos 	if (pv->pv_pmap == NULL) {
   1541   1.1  christos 		KASSERT(pv->pv_next == NULL);
   1542   1.1  christos 		/*
   1543   1.1  christos 		 * No entries yet, use header as the first entry
   1544   1.1  christos 		 */
   1545   1.1  christos 		PMAP_COUNT(primary_mappings);
   1546   1.1  christos 		PMAP_COUNT(mappings);
   1547   1.1  christos 		first = true;
   1548   1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1549   1.1  christos 		pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1550   1.1  christos #endif
   1551   1.1  christos 		pv->pv_pmap = pmap;
   1552   1.1  christos 		pv->pv_va = va;
   1553   1.1  christos 	} else {
   1554   1.1  christos 		if (pmap_md_vca_add(pg, va, npte))
   1555   1.1  christos 			goto again;
   1556   1.1  christos 
   1557   1.1  christos 		/*
   1558   1.1  christos 		 * There is at least one other VA mapping this page.
   1559   1.1  christos 		 * Place this entry after the header.
   1560   1.1  christos 		 *
   1561   1.1  christos 		 * Note: the entry may already be in the table if
   1562   1.1  christos 		 * we are only changing the protection bits.
   1563   1.1  christos 		 */
   1564   1.1  christos 
   1565   1.1  christos #ifdef PARANOIADIAG
   1566   1.1  christos 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1567   1.1  christos #endif
   1568   1.1  christos 		for (npv = pv; npv; npv = npv->pv_next) {
   1569   1.1  christos 			if (pmap == npv->pv_pmap && va == npv->pv_va) {
   1570   1.1  christos #ifdef PARANOIADIAG
   1571   1.1  christos 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1572   1.1  christos 				pt_entry_t pt_entry = (ptep ? *ptep : 0);
   1573   1.1  christos 				if (!pte_valid_p(pt_entry)
   1574   1.1  christos 				    || pte_to_paddr(pt_entry) != pa)
   1575   1.1  christos 					printf(
   1576   1.1  christos 		"pmap_enter_pv: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n",
   1577   1.1  christos 					    va, pa, pt_entry);
   1578   1.1  christos #endif
   1579   1.1  christos 				PMAP_COUNT(remappings);
   1580   1.1  christos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1581   1.1  christos 				if (__predict_false(apv != NULL))
   1582   1.1  christos 					pmap_pv_free(apv);
   1583   1.1  christos 				return;
   1584   1.1  christos 			}
   1585   1.1  christos 		}
   1586   1.1  christos 		if (__predict_true(apv == NULL)) {
   1587   1.1  christos 			/*
   1588   1.1  christos 			 * To allocate a PV, we have to release the PVLIST lock
   1589   1.1  christos 			 * so get the page generation.  We allocate the PV, and
   1590   1.1  christos 			 * then reacquire the lock.
   1591   1.1  christos 			 */
   1592   1.1  christos 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1593   1.1  christos 
   1594   1.1  christos 			apv = (pv_entry_t)pmap_pv_alloc();
   1595   1.1  christos 			if (apv == NULL)
   1596   1.1  christos 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1597   1.1  christos 
   1598   1.1  christos 			/*
   1599   1.1  christos 			 * If the generation has changed, then someone else
   1600   1.1  christos 			 * tinkered with this page so we should
   1601   1.1  christos 			 * start over.
   1602   1.1  christos 			 */
   1603   1.1  christos 			uint16_t oldgen = gen;
   1604   1.1  christos 			gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1605   1.1  christos 			if (gen != oldgen)
   1606   1.1  christos 				goto again;
   1607   1.1  christos 		}
   1608   1.1  christos 		npv = apv;
   1609   1.1  christos 		apv = NULL;
   1610   1.1  christos 		npv->pv_va = va;
   1611   1.1  christos 		npv->pv_pmap = pmap;
   1612   1.1  christos 		npv->pv_next = pv->pv_next;
   1613   1.1  christos 		pv->pv_next = npv;
   1614   1.1  christos 		PMAP_COUNT(mappings);
   1615   1.1  christos 	}
   1616   1.1  christos 	pmap_check_pvlist(pg);
   1617   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1618   1.1  christos 	if (__predict_false(apv != NULL))
   1619   1.1  christos 		pmap_pv_free(apv);
   1620   1.1  christos 
   1621   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done pv=%p%s",
   1622   1.1  christos 	    pv, first ? " (first pv)" : "",0,0);
   1623   1.1  christos }
   1624   1.1  christos 
   1625   1.1  christos /*
   1626   1.1  christos  * Remove a physical to virtual address translation.
   1627   1.1  christos  * If cache was inhibited on this page, and there are no more cache
   1628   1.1  christos  * conflicts, restore caching.
   1629   1.1  christos  * Flush the cache if the last page is removed (should always be cached
   1630   1.1  christos  * at this point).
   1631   1.1  christos  */
   1632   1.1  christos void
   1633   1.1  christos pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   1634   1.1  christos {
   1635   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1636   1.1  christos 	pv_entry_t pv, npv;
   1637   1.1  christos 	bool last;
   1638   1.1  christos 
   1639   1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1640   1.1  christos 	UVMHIST_LOG(pmaphist,
   1641   1.1  christos 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (pa %#"PRIxPADDR")\n",
   1642   1.1  christos 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1643   1.1  christos 	UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0,0,0);
   1644   1.1  christos 
   1645   1.1  christos 	KASSERT(kpreempt_disabled());
   1646   1.1  christos 	pv = &mdpg->mdpg_first;
   1647   1.1  christos 
   1648   1.1  christos 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1649   1.1  christos 	pmap_check_pvlist(pg);
   1650   1.1  christos 
   1651   1.1  christos 	/*
   1652   1.1  christos 	 * If it is the first entry on the list, it is actually
   1653   1.1  christos 	 * in the header and we must copy the following entry up
   1654   1.1  christos 	 * to the header.  Otherwise we must search the list for
   1655   1.1  christos 	 * the entry.  In either case we free the now unused entry.
   1656   1.1  christos 	 */
   1657   1.1  christos 
   1658   1.1  christos 	last = false;
   1659   1.1  christos 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
   1660   1.1  christos 		npv = pv->pv_next;
   1661   1.1  christos 		if (npv) {
   1662   1.1  christos 			*pv = *npv;
   1663   1.1  christos 			KASSERT(pv->pv_pmap != NULL);
   1664   1.1  christos 		} else {
   1665   1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1666   1.1  christos 			pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1667   1.1  christos #endif
   1668   1.1  christos 			pv->pv_pmap = NULL;
   1669   1.1  christos 			last = true;	/* Last mapping removed */
   1670   1.1  christos 		}
   1671   1.1  christos 		PMAP_COUNT(remove_pvfirst);
   1672   1.1  christos 	} else {
   1673   1.1  christos 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   1674   1.1  christos 			PMAP_COUNT(remove_pvsearch);
   1675   1.1  christos 			if (pmap == npv->pv_pmap && va == npv->pv_va)
   1676   1.1  christos 				break;
   1677   1.1  christos 		}
   1678   1.1  christos 		if (npv) {
   1679   1.1  christos 			pv->pv_next = npv->pv_next;
   1680   1.1  christos 		}
   1681   1.1  christos 	}
   1682   1.1  christos 	pmap_md_vca_remove(pg, va);
   1683   1.1  christos 
   1684   1.1  christos 	pmap_check_pvlist(pg);
   1685   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1686   1.1  christos 
   1687   1.1  christos 	/*
   1688   1.1  christos 	 * Free the pv_entry if needed.
   1689   1.1  christos 	 */
   1690   1.1  christos 	if (npv)
   1691   1.1  christos 		pmap_pv_free(npv);
   1692   1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   1693   1.1  christos 		if (last) {
   1694   1.1  christos 			/*
   1695   1.1  christos 			 * If this was the page's last mapping, we no longer
   1696   1.1  christos 			 * care about its execness.
   1697   1.1  christos 			 */
   1698   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1699   1.1  christos 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1700   1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg),
   1701   1.1  christos 			    last ? " [last mapping]" : "",
   1702   1.1  christos 			    "execpage cleared");
   1703   1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1704   1.1  christos 			PMAP_COUNT(exec_uncached_remove);
   1705   1.1  christos 		} else {
   1706   1.1  christos 			/*
   1707   1.1  christos 			 * Someone still has it mapped as an executable page
   1708   1.1  christos 			 * so we must sync it.
   1709   1.1  christos 			 */
   1710   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1711   1.1  christos 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1712   1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg),
   1713   1.1  christos 			    last ? " [last mapping]" : "",
   1714   1.1  christos 			    "performed syncicache");
   1715   1.1  christos 			pmap_page_syncicache(pg);
   1716   1.1  christos 			PMAP_COUNT(exec_synced_remove);
   1717   1.1  christos 		}
   1718   1.1  christos 	}
   1719   1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1720   1.1  christos }
   1721   1.1  christos 
   1722   1.1  christos #if defined(MULTIPROCESSOR)
   1723   1.1  christos struct pmap_pvlist_info {
   1724   1.1  christos 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   1725   1.1  christos 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   1726   1.1  christos 	volatile u_int pli_lock_index;
   1727   1.1  christos 	u_int pli_lock_mask;
   1728   1.1  christos } pmap_pvlist_info;
   1729   1.1  christos 
   1730   1.1  christos void
   1731   1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   1732   1.1  christos {
   1733   1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1734   1.1  christos 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   1735   1.1  christos 	vaddr_t lock_va = lock_page;
   1736   1.1  christos 	if (sizeof(kmutex_t) > cache_line_size) {
   1737   1.1  christos 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   1738   1.1  christos 	}
   1739   1.1  christos 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   1740   1.1  christos 	KASSERT((nlocks & (nlocks - 1)) == 0);
   1741   1.1  christos 	/*
   1742   1.1  christos 	 * Now divide the page into a number of mutexes, one per cacheline.
   1743   1.1  christos 	 */
   1744   1.1  christos 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   1745   1.1  christos 		kmutex_t * const lock = (kmutex_t *)lock_va;
   1746   1.1  christos 		mutex_init(lock, MUTEX_DEFAULT, IPL_VM);
   1747   1.1  christos 		pli->pli_locks[i] = lock;
   1748   1.1  christos 	}
   1749   1.1  christos 	pli->pli_lock_mask = nlocks - 1;
   1750   1.1  christos }
   1751   1.1  christos 
   1752   1.1  christos uint16_t
   1753   1.1  christos pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1754   1.1  christos {
   1755   1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1756   1.1  christos 	kmutex_t *lock = mdpg->mdpg_lock;
   1757   1.1  christos 	int16_t gen;
   1758   1.1  christos 
   1759   1.1  christos 	/*
   1760   1.1  christos 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   1761   1.1  christos 	 * semi-random distribution not based on page color.
   1762   1.1  christos 	 */
   1763   1.1  christos 	if (__predict_false(lock == NULL)) {
   1764   1.1  christos 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   1765   1.1  christos 		size_t lockid = locknum & pli->pli_lock_mask;
   1766   1.1  christos 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   1767   1.1  christos 		/*
   1768   1.1  christos 		 * Set the lock.  If some other thread already did, just use
   1769   1.1  christos 		 * the one they assigned.
   1770   1.1  christos 		 */
   1771   1.1  christos 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   1772   1.1  christos 		if (lock == NULL) {
   1773   1.1  christos 			lock = new_lock;
   1774   1.1  christos 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   1775   1.1  christos 		}
   1776   1.1  christos 	}
   1777   1.1  christos 
   1778   1.1  christos 	/*
   1779   1.1  christos 	 * Now finally lock the pvlists.
   1780   1.1  christos 	 */
   1781   1.1  christos 	mutex_spin_enter(lock);
   1782   1.1  christos 
   1783   1.1  christos 	/*
   1784   1.1  christos 	 * If the locker will be changing the list, increment the high 16 bits
   1785   1.1  christos 	 * of attrs so we use that as a generation number.
   1786   1.1  christos 	 */
   1787   1.1  christos 	gen = VM_PAGEMD_PVLIST_GEN(mdpg);		/* get old value */
   1788   1.1  christos 	if (list_change)
   1789   1.1  christos 		atomic_add_int(&mdpg->mdpg_attrs, 0x10000);
   1790   1.1  christos 
   1791   1.1  christos 	/*
   1792   1.1  christos 	 * Return the generation number.
   1793   1.1  christos 	 */
   1794   1.1  christos 	return gen;
   1795   1.1  christos }
   1796   1.1  christos #else /* !MULTIPROCESSOR */
   1797   1.1  christos void
   1798   1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   1799   1.1  christos {
   1800   1.1  christos 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM);
   1801   1.1  christos }
   1802   1.1  christos 
   1803   1.1  christos #ifdef MODULAR
   1804   1.1  christos uint16_t
   1805   1.1  christos pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1806   1.1  christos {
   1807   1.1  christos 	/*
   1808   1.1  christos 	 * We just use a global lock.
   1809   1.1  christos 	 */
   1810   1.1  christos 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   1811   1.1  christos 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   1812   1.1  christos 	}
   1813   1.1  christos 
   1814   1.1  christos 	/*
   1815   1.1  christos 	 * Now finally lock the pvlists.
   1816   1.1  christos 	 */
   1817   1.1  christos 	mutex_spin_enter(mdpg->mdpg_lock);
   1818   1.1  christos 
   1819   1.1  christos 	return 0;
   1820   1.1  christos }
   1821   1.1  christos #endif /* MODULAR */
   1822   1.1  christos #endif /* !MULTIPROCESSOR */
   1823   1.1  christos 
   1824   1.1  christos /*
   1825   1.1  christos  * pmap_pv_page_alloc:
   1826   1.1  christos  *
   1827   1.1  christos  *	Allocate a page for the pv_entry pool.
   1828   1.1  christos  */
   1829   1.1  christos void *
   1830   1.1  christos pmap_pv_page_alloc(struct pool *pp, int flags)
   1831   1.1  christos {
   1832   1.1  christos 	struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   1833   1.1  christos 	if (pg == NULL)
   1834   1.1  christos 		return NULL;
   1835   1.1  christos 
   1836   1.1  christos 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   1837   1.1  christos }
   1838   1.1  christos 
   1839   1.1  christos /*
   1840   1.1  christos  * pmap_pv_page_free:
   1841   1.1  christos  *
   1842   1.1  christos  *	Free a pv_entry pool page.
   1843   1.1  christos  */
   1844   1.1  christos void
   1845   1.1  christos pmap_pv_page_free(struct pool *pp, void *v)
   1846   1.1  christos {
   1847   1.1  christos 	vaddr_t va = (vaddr_t)v;
   1848   1.1  christos 
   1849   1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1850   1.1  christos 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1851   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1852   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1853   1.1  christos 	pmap_md_vca_remove(pg, va);
   1854   1.1  christos 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1855   1.1  christos 	uvm_pagefree(pg);
   1856   1.1  christos }
   1857   1.1  christos 
   1858   1.1  christos #ifdef PMAP_PREFER
   1859   1.1  christos /*
   1860   1.1  christos  * Find first virtual address >= *vap that doesn't cause
   1861   1.1  christos  * a cache alias conflict.
   1862   1.1  christos  */
   1863   1.1  christos void
   1864   1.1  christos pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   1865   1.1  christos {
   1866   1.1  christos 	vaddr_t	va;
   1867   1.1  christos 	vsize_t d;
   1868   1.1  christos 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   1869   1.1  christos 
   1870   1.1  christos 	PMAP_COUNT(prefer_requests);
   1871   1.1  christos 
   1872   1.1  christos 	prefer_mask |= pmap_md_cache_prefer_mask();
   1873   1.1  christos 
   1874   1.1  christos 	if (prefer_mask) {
   1875   1.1  christos 		va = *vap;
   1876   1.1  christos 
   1877   1.1  christos 		d = foff - va;
   1878   1.1  christos 		d &= prefer_mask;
   1879   1.1  christos 		if (d) {
   1880   1.1  christos 			if (td)
   1881   1.1  christos 				*vap = trunc_page(va -((-d) & prefer_mask));
   1882   1.1  christos 			else
   1883   1.1  christos 				*vap = round_page(va + d);
   1884   1.1  christos 			PMAP_COUNT(prefer_adjustments);
   1885   1.1  christos 		}
   1886   1.1  christos 	}
   1887   1.1  christos }
   1888   1.1  christos #endif /* PMAP_PREFER */
   1889   1.1  christos 
   1890   1.1  christos #ifdef PMAP_MAP_POOLPAGE
   1891   1.1  christos vaddr_t
   1892   1.1  christos pmap_map_poolpage(paddr_t pa)
   1893   1.1  christos {
   1894   1.1  christos 
   1895   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1896   1.1  christos 	KASSERT(pg);
   1897   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1898   1.1  christos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1899   1.1  christos 
   1900   1.1  christos 	const vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
   1901   1.1  christos 	pmap_md_vca_add(pg, va, NULL);
   1902   1.1  christos 	return va;
   1903   1.1  christos }
   1904   1.1  christos 
   1905   1.1  christos paddr_t
   1906   1.1  christos pmap_unmap_poolpage(vaddr_t va)
   1907   1.1  christos {
   1908   1.1  christos 
   1909   1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1910   1.1  christos 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1911   1.1  christos 
   1912   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1913   1.1  christos 	KASSERT(pg);
   1914   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1915   1.1  christos 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1916   1.1  christos 	pmap_md_unmap_poolpage(va, NBPG);
   1917   1.1  christos 	pmap_md_vca_remove(pg, va);
   1918   1.1  christos 
   1919   1.1  christos 	return pa;
   1920   1.1  christos }
   1921   1.1  christos #endif /* PMAP_MAP_POOLPAGE */
   1922