Home | History | Annotate | Line # | Download | only in pmap
      1  1.80     skrll /*	$NetBSD: pmap.c,v 1.80 2024/05/06 07:18:19 skrll Exp $	*/
      2   1.1  christos 
      3   1.1  christos /*-
      4   1.1  christos  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5   1.1  christos  * All rights reserved.
      6   1.1  christos  *
      7   1.1  christos  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  christos  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.1  christos  * NASA Ames Research Center and by Chris G. Demetriou.
     10   1.1  christos  *
     11   1.1  christos  * Redistribution and use in source and binary forms, with or without
     12   1.1  christos  * modification, are permitted provided that the following conditions
     13   1.1  christos  * are met:
     14   1.1  christos  * 1. Redistributions of source code must retain the above copyright
     15   1.1  christos  *    notice, this list of conditions and the following disclaimer.
     16   1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     18   1.1  christos  *    documentation and/or other materials provided with the distribution.
     19   1.1  christos  *
     20   1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1  christos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1  christos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1  christos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1  christos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1  christos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1  christos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1  christos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1  christos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1  christos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1  christos  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1  christos  */
     32   1.1  christos 
     33   1.1  christos /*
     34   1.1  christos  * Copyright (c) 1992, 1993
     35   1.1  christos  *	The Regents of the University of California.  All rights reserved.
     36   1.1  christos  *
     37   1.1  christos  * This code is derived from software contributed to Berkeley by
     38   1.1  christos  * the Systems Programming Group of the University of Utah Computer
     39   1.1  christos  * Science Department and Ralph Campbell.
     40   1.1  christos  *
     41   1.1  christos  * Redistribution and use in source and binary forms, with or without
     42   1.1  christos  * modification, are permitted provided that the following conditions
     43   1.1  christos  * are met:
     44   1.1  christos  * 1. Redistributions of source code must retain the above copyright
     45   1.1  christos  *    notice, this list of conditions and the following disclaimer.
     46   1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     47   1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     48   1.1  christos  *    documentation and/or other materials provided with the distribution.
     49   1.1  christos  * 3. Neither the name of the University nor the names of its contributors
     50   1.1  christos  *    may be used to endorse or promote products derived from this software
     51   1.1  christos  *    without specific prior written permission.
     52   1.1  christos  *
     53   1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54   1.1  christos  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55   1.1  christos  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56   1.1  christos  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57   1.1  christos  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58   1.1  christos  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59   1.1  christos  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60   1.1  christos  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61   1.1  christos  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62   1.1  christos  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63   1.1  christos  * SUCH DAMAGE.
     64   1.1  christos  *
     65   1.1  christos  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66   1.1  christos  */
     67   1.1  christos 
     68   1.1  christos #include <sys/cdefs.h>
     69   1.1  christos 
     70  1.80     skrll __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.80 2024/05/06 07:18:19 skrll Exp $");
     71   1.1  christos 
     72   1.1  christos /*
     73   1.1  christos  *	Manages physical address maps.
     74   1.1  christos  *
     75   1.1  christos  *	In addition to hardware address maps, this
     76   1.1  christos  *	module is called upon to provide software-use-only
     77   1.1  christos  *	maps which may or may not be stored in the same
     78   1.1  christos  *	form as hardware maps.  These pseudo-maps are
     79   1.1  christos  *	used to store intermediate results from copy
     80   1.1  christos  *	operations to and from address spaces.
     81   1.1  christos  *
     82   1.1  christos  *	Since the information managed by this module is
     83   1.1  christos  *	also stored by the logical address mapping module,
     84   1.1  christos  *	this module may throw away valid virtual-to-physical
     85   1.1  christos  *	mappings at almost any time.  However, invalidations
     86   1.1  christos  *	of virtual-to-physical mappings must be done as
     87   1.1  christos  *	requested.
     88   1.1  christos  *
     89   1.1  christos  *	In order to cope with hardware architectures which
     90   1.1  christos  *	make virtual-to-physical map invalidates expensive,
     91   1.1  christos  *	this module may delay invalidate or reduced protection
     92   1.1  christos  *	operations until such time as they are actually
     93   1.1  christos  *	necessary.  This module is given full information as
     94   1.1  christos  *	to which processors are currently using which maps,
     95   1.1  christos  *	and to when physical maps must be made correct.
     96   1.1  christos  */
     97   1.1  christos 
     98  1.69     skrll #include "opt_ddb.h"
     99  1.72     skrll #include "opt_efi.h"
    100   1.1  christos #include "opt_modular.h"
    101   1.1  christos #include "opt_multiprocessor.h"
    102   1.1  christos #include "opt_sysv.h"
    103  1.69     skrll #include "opt_uvmhist.h"
    104   1.1  christos 
    105   1.1  christos #define __PMAP_PRIVATE
    106   1.1  christos 
    107   1.1  christos #include <sys/param.h>
    108  1.45     skrll 
    109  1.61     skrll #include <sys/asan.h>
    110  1.15      matt #include <sys/atomic.h>
    111   1.1  christos #include <sys/buf.h>
    112  1.15      matt #include <sys/cpu.h>
    113  1.15      matt #include <sys/mutex.h>
    114   1.1  christos #include <sys/pool.h>
    115   1.1  christos 
    116   1.1  christos #include <uvm/uvm.h>
    117  1.26    cherry #include <uvm/uvm_physseg.h>
    118  1.58     skrll #include <uvm/pmap/pmap_pvt.h>
    119   1.1  christos 
    120  1.15      matt #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
    121  1.15      matt     && !defined(PMAP_NO_PV_UNCACHED)
    122  1.15      matt #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
    123  1.15      matt  PMAP_NO_PV_UNCACHED to be defined
    124  1.15      matt #endif
    125   1.1  christos 
    126  1.65       rin #if defined(PMAP_PV_TRACK_ONLY_STUBS)
    127  1.65       rin #undef	__HAVE_PMAP_PV_TRACK
    128  1.65       rin #endif
    129  1.65       rin 
    130   1.1  christos PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    131   1.1  christos PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    132   1.1  christos PMAP_COUNTER(remove_user_calls, "remove user calls");
    133   1.1  christos PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    134   1.1  christos PMAP_COUNTER(remove_flushes, "remove cache flushes");
    135   1.1  christos PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    136   1.1  christos PMAP_COUNTER(remove_pvfirst, "remove pv first");
    137   1.1  christos PMAP_COUNTER(remove_pvsearch, "remove pv search");
    138   1.1  christos 
    139   1.1  christos PMAP_COUNTER(prefer_requests, "prefer requests");
    140   1.1  christos PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    141   1.1  christos 
    142   1.1  christos PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    143   1.1  christos 
    144   1.1  christos PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    145   1.1  christos PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    146   1.1  christos PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    147   1.1  christos PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    148   1.1  christos 
    149   1.1  christos PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    150   1.1  christos PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    151   1.1  christos 
    152   1.1  christos PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    153   1.1  christos PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    154   1.1  christos PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    155   1.1  christos PMAP_COUNTER(user_mappings, "user pages mapped");
    156   1.1  christos PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    157   1.1  christos PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    158   1.1  christos PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    159   1.1  christos PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    160  1.58     skrll PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
    161  1.72     skrll PMAP_COUNTER(efirt_mappings, "EFI RT pages mapped");
    162   1.1  christos PMAP_COUNTER(managed_mappings, "managed pages mapped");
    163   1.1  christos PMAP_COUNTER(mappings, "pages mapped");
    164   1.1  christos PMAP_COUNTER(remappings, "pages remapped");
    165   1.1  christos PMAP_COUNTER(unmappings, "pages unmapped");
    166   1.1  christos PMAP_COUNTER(primary_mappings, "page initial mappings");
    167   1.1  christos PMAP_COUNTER(primary_unmappings, "page final unmappings");
    168   1.1  christos PMAP_COUNTER(tlb_hit, "page mapping");
    169   1.1  christos 
    170   1.1  christos PMAP_COUNTER(exec_mappings, "exec pages mapped");
    171   1.1  christos PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    172   1.1  christos PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    173   1.1  christos PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    174   1.1  christos PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    175   1.1  christos PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    176   1.1  christos PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    177   1.1  christos PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    178   1.1  christos PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    179   1.1  christos PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    180   1.1  christos PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    181   1.1  christos 
    182   1.1  christos PMAP_COUNTER(create, "creates");
    183   1.1  christos PMAP_COUNTER(reference, "references");
    184   1.1  christos PMAP_COUNTER(dereference, "dereferences");
    185   1.1  christos PMAP_COUNTER(destroy, "destroyed");
    186   1.1  christos PMAP_COUNTER(activate, "activations");
    187   1.1  christos PMAP_COUNTER(deactivate, "deactivations");
    188   1.1  christos PMAP_COUNTER(update, "updates");
    189   1.1  christos #ifdef MULTIPROCESSOR
    190   1.1  christos PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    191   1.1  christos #endif
    192   1.1  christos PMAP_COUNTER(unwire, "unwires");
    193   1.1  christos PMAP_COUNTER(copy, "copies");
    194   1.1  christos PMAP_COUNTER(clear_modify, "clear_modifies");
    195   1.1  christos PMAP_COUNTER(protect, "protects");
    196   1.1  christos PMAP_COUNTER(page_protect, "page_protects");
    197   1.1  christos 
    198   1.1  christos #define PMAP_ASID_RESERVED 0
    199   1.1  christos CTASSERT(PMAP_ASID_RESERVED == 0);
    200   1.1  christos 
    201  1.69     skrll #ifdef PMAP_HWPAGEWALKER
    202  1.69     skrll #ifndef PMAP_PDETAB_ALIGN
    203  1.69     skrll #define PMAP_PDETAB_ALIGN	/* nothing */
    204  1.69     skrll #endif
    205  1.69     skrll 
    206  1.69     skrll #ifdef _LP64
    207  1.69     skrll pmap_pdetab_t	pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */
    208  1.69     skrll #endif
    209  1.69     skrll pmap_pdetab_t	pmap_kern_pdetab PMAP_PDETAB_ALIGN;
    210  1.69     skrll #endif
    211  1.69     skrll 
    212  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    213  1.15      matt #ifndef PMAP_SEGTAB_ALIGN
    214  1.15      matt #define PMAP_SEGTAB_ALIGN	/* nothing */
    215  1.15      matt #endif
    216  1.15      matt #ifdef _LP64
    217  1.15      matt pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
    218  1.15      matt #endif
    219  1.15      matt pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
    220  1.15      matt #ifdef _LP64
    221  1.68     skrll 	.seg_seg[(VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NSEGPG - 1)] = &pmap_kstart_segtab,
    222   1.1  christos #endif
    223  1.15      matt };
    224  1.69     skrll #endif
    225   1.1  christos 
    226   1.1  christos struct pmap_kernel kernel_pmap_store = {
    227   1.1  christos 	.kernel_pmap = {
    228  1.70     skrll 		.pm_refcnt = 1,
    229  1.69     skrll #ifdef PMAP_HWPAGEWALKER
    230  1.69     skrll 		.pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS,
    231  1.69     skrll #endif
    232  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    233  1.15      matt 		.pm_segtab = &pmap_kern_segtab,
    234  1.69     skrll #endif
    235   1.1  christos 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    236   1.1  christos 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    237   1.1  christos 	},
    238   1.1  christos };
    239   1.1  christos 
    240   1.1  christos struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    241   1.1  christos 
    242  1.72     skrll #if defined(EFI_RUNTIME)
    243  1.72     skrll static struct pmap efirt_pmap;
    244  1.72     skrll 
    245  1.72     skrll pmap_t
    246  1.72     skrll pmap_efirt(void)
    247  1.72     skrll {
    248  1.72     skrll 	return &efirt_pmap;
    249  1.72     skrll }
    250  1.72     skrll #else
    251  1.72     skrll static inline pt_entry_t
    252  1.72     skrll pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
    253  1.72     skrll {
    254  1.72     skrll 	panic("not supported");
    255  1.72     skrll }
    256  1.72     skrll #endif
    257  1.72     skrll 
    258  1.61     skrll /* The current top of kernel VM - gets updated by pmap_growkernel */
    259  1.61     skrll vaddr_t pmap_curmaxkvaddr;
    260  1.61     skrll 
    261  1.15      matt struct pmap_limits pmap_limits = {	/* VA and PA limits */
    262  1.12      matt 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
    263  1.61     skrll 	.virtual_end = VM_MAX_KERNEL_ADDRESS,
    264  1.12      matt };
    265   1.1  christos 
    266   1.1  christos #ifdef UVMHIST
    267   1.1  christos static struct kern_history_ent pmapexechistbuf[10000];
    268   1.1  christos static struct kern_history_ent pmaphistbuf[10000];
    269  1.69     skrll static struct kern_history_ent pmapxtabhistbuf[5000];
    270  1.62       mrg UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf);
    271  1.62       mrg UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf);
    272  1.69     skrll UVMHIST_DEFINE(pmapxtabhist) = UVMHIST_INITIALIZER(pmapxtabhist, pmapxtabhistbuf);
    273   1.1  christos #endif
    274   1.1  christos 
    275   1.1  christos /*
    276   1.1  christos  * The pools from which pmap structures and sub-structures are allocated.
    277   1.1  christos  */
    278   1.1  christos struct pool pmap_pmap_pool;
    279   1.1  christos struct pool pmap_pv_pool;
    280   1.1  christos 
    281   1.1  christos #ifndef PMAP_PV_LOWAT
    282   1.1  christos #define	PMAP_PV_LOWAT	16
    283   1.1  christos #endif
    284  1.15      matt int	pmap_pv_lowat = PMAP_PV_LOWAT;
    285   1.1  christos 
    286  1.15      matt bool	pmap_initialized = false;
    287   1.1  christos #define	PMAP_PAGE_COLOROK_P(a, b) \
    288   1.1  christos 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    289  1.15      matt u_int	pmap_page_colormask;
    290   1.1  christos 
    291  1.15      matt #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    292   1.1  christos 
    293   1.1  christos #define PMAP_IS_ACTIVE(pm)						\
    294  1.67     skrll 	((pm) == pmap_kernel() ||					\
    295   1.1  christos 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    296   1.1  christos 
    297   1.1  christos /* Forward function declarations */
    298  1.58     skrll void pmap_page_remove(struct vm_page_md *);
    299  1.15      matt static void pmap_pvlist_check(struct vm_page_md *);
    300   1.1  christos void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    301  1.58     skrll void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
    302   1.1  christos 
    303   1.1  christos /*
    304   1.1  christos  * PV table management functions.
    305   1.1  christos  */
    306   1.1  christos void	*pmap_pv_page_alloc(struct pool *, int);
    307   1.1  christos void	pmap_pv_page_free(struct pool *, void *);
    308   1.1  christos 
    309   1.1  christos struct pool_allocator pmap_pv_page_allocator = {
    310   1.1  christos 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    311   1.1  christos };
    312   1.1  christos 
    313   1.1  christos #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    314   1.1  christos #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    315   1.1  christos 
    316  1.46   thorpej #ifndef PMAP_NEED_TLB_MISS_LOCK
    317  1.46   thorpej 
    318  1.46   thorpej #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
    319  1.46   thorpej #define	PMAP_NEED_TLB_MISS_LOCK
    320  1.46   thorpej #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
    321  1.46   thorpej 
    322  1.46   thorpej #endif /* PMAP_NEED_TLB_MISS_LOCK */
    323  1.46   thorpej 
    324  1.46   thorpej #ifdef PMAP_NEED_TLB_MISS_LOCK
    325  1.46   thorpej 
    326  1.46   thorpej #ifdef PMAP_MD_NEED_TLB_MISS_LOCK
    327  1.46   thorpej #define	pmap_tlb_miss_lock_init()	__nothing /* MD code deals with this */
    328  1.46   thorpej #define	pmap_tlb_miss_lock_enter()	pmap_md_tlb_miss_lock_enter()
    329  1.46   thorpej #define	pmap_tlb_miss_lock_exit()	pmap_md_tlb_miss_lock_exit()
    330  1.46   thorpej #else
    331  1.67     skrll kmutex_t pmap_tlb_miss_lock		__cacheline_aligned;
    332  1.46   thorpej 
    333  1.46   thorpej static void
    334  1.46   thorpej pmap_tlb_miss_lock_init(void)
    335  1.46   thorpej {
    336  1.46   thorpej 	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
    337  1.46   thorpej }
    338  1.46   thorpej 
    339  1.46   thorpej static inline void
    340  1.46   thorpej pmap_tlb_miss_lock_enter(void)
    341  1.46   thorpej {
    342  1.46   thorpej 	mutex_spin_enter(&pmap_tlb_miss_lock);
    343  1.46   thorpej }
    344  1.46   thorpej 
    345  1.46   thorpej static inline void
    346  1.46   thorpej pmap_tlb_miss_lock_exit(void)
    347  1.46   thorpej {
    348  1.46   thorpej 	mutex_spin_exit(&pmap_tlb_miss_lock);
    349  1.46   thorpej }
    350  1.46   thorpej #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
    351  1.46   thorpej 
    352  1.46   thorpej #else
    353  1.46   thorpej 
    354  1.46   thorpej #define	pmap_tlb_miss_lock_init()	__nothing
    355  1.46   thorpej #define	pmap_tlb_miss_lock_enter()	__nothing
    356  1.46   thorpej #define	pmap_tlb_miss_lock_exit()	__nothing
    357  1.46   thorpej 
    358  1.46   thorpej #endif /* PMAP_NEED_TLB_MISS_LOCK */
    359  1.15      matt 
    360  1.15      matt #ifndef MULTIPROCESSOR
    361  1.15      matt kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
    362  1.15      matt #endif
    363  1.15      matt 
    364  1.15      matt /*
    365  1.15      matt  * Debug functions.
    366  1.15      matt  */
    367  1.15      matt 
    368  1.19  jakllsch #ifdef DEBUG
    369  1.76     skrll 
    370  1.77     skrll bool pmap_stealdebug = false;
    371  1.76     skrll 
    372  1.76     skrll #define DPRINTF(...)							     \
    373  1.76     skrll     do { if (pmap_stealdebug) { printf(__VA_ARGS__); } } while (false)
    374  1.76     skrll 
    375  1.15      matt static inline void
    376  1.15      matt pmap_asid_check(pmap_t pm, const char *func)
    377  1.15      matt {
    378  1.15      matt 	if (!PMAP_IS_ACTIVE(pm))
    379  1.15      matt 		return;
    380  1.15      matt 
    381  1.15      matt 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
    382  1.15      matt 	tlb_asid_t asid = tlb_get_asid();
    383  1.15      matt 	if (asid != pai->pai_asid)
    384  1.15      matt 		panic("%s: inconsistency for active TLB update: %u <-> %u",
    385  1.15      matt 		    func, asid, pai->pai_asid);
    386  1.19  jakllsch }
    387  1.76     skrll #else
    388  1.76     skrll 
    389  1.76     skrll #define DPRINTF(...) __nothing
    390  1.76     skrll 
    391  1.15      matt #endif
    392  1.15      matt 
    393  1.15      matt static void
    394  1.15      matt pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
    395  1.15      matt {
    396  1.15      matt #ifdef DEBUG
    397  1.15      matt 	if (pmap == pmap_kernel()) {
    398  1.15      matt 		if (sva < VM_MIN_KERNEL_ADDRESS)
    399  1.15      matt 			panic("%s: kva %#"PRIxVADDR" not in range",
    400  1.15      matt 			    func, sva);
    401  1.15      matt 		if (eva >= pmap_limits.virtual_end)
    402  1.15      matt 			panic("%s: kva %#"PRIxVADDR" not in range",
    403  1.15      matt 			    func, eva);
    404  1.15      matt 	} else {
    405  1.15      matt 		if (eva > VM_MAXUSER_ADDRESS)
    406  1.15      matt 			panic("%s: uva %#"PRIxVADDR" not in range",
    407  1.15      matt 			    func, eva);
    408  1.15      matt 		pmap_asid_check(pmap, func);
    409  1.15      matt 	}
    410  1.15      matt #endif
    411  1.15      matt }
    412  1.10    nonaka 
    413   1.1  christos /*
    414   1.1  christos  * Misc. functions.
    415   1.1  christos  */
    416   1.1  christos 
    417   1.1  christos bool
    418  1.78     skrll pmap_page_clear_attributes(struct vm_page_md *mdpg, u_long clear_attributes)
    419   1.1  christos {
    420  1.78     skrll 	volatile u_long * const attrp = &mdpg->mdpg_attrs;
    421  1.69     skrll 
    422   1.1  christos #ifdef MULTIPROCESSOR
    423   1.1  christos 	for (;;) {
    424  1.78     skrll 		u_long old_attr = *attrp;
    425   1.1  christos 		if ((old_attr & clear_attributes) == 0)
    426   1.1  christos 			return false;
    427  1.78     skrll 		u_long new_attr = old_attr & ~clear_attributes;
    428  1.15      matt 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
    429   1.1  christos 			return true;
    430   1.1  christos 	}
    431   1.1  christos #else
    432  1.78     skrll 	u_long old_attr = *attrp;
    433   1.1  christos 	if ((old_attr & clear_attributes) == 0)
    434   1.1  christos 		return false;
    435   1.1  christos 	*attrp &= ~clear_attributes;
    436   1.1  christos 	return true;
    437   1.1  christos #endif
    438   1.1  christos }
    439   1.1  christos 
    440   1.1  christos void
    441  1.78     skrll pmap_page_set_attributes(struct vm_page_md *mdpg, u_long set_attributes)
    442   1.1  christos {
    443   1.1  christos #ifdef MULTIPROCESSOR
    444  1.15      matt 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
    445   1.1  christos #else
    446   1.1  christos 	mdpg->mdpg_attrs |= set_attributes;
    447   1.1  christos #endif
    448   1.1  christos }
    449   1.1  christos 
    450   1.1  christos static void
    451   1.1  christos pmap_page_syncicache(struct vm_page *pg)
    452   1.1  christos {
    453  1.53     skrll 	UVMHIST_FUNC(__func__);
    454  1.53     skrll 	UVMHIST_CALLED(pmaphist);
    455   1.1  christos #ifndef MULTIPROCESSOR
    456  1.15      matt 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
    457   1.1  christos #endif
    458   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    459   1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
    460   1.2      matt 	kcpuset_t *onproc;
    461   1.2      matt #ifdef MULTIPROCESSOR
    462   1.2      matt 	kcpuset_create(&onproc, true);
    463  1.15      matt 	KASSERT(onproc != NULL);
    464   1.3      matt #else
    465   1.3      matt 	onproc = NULL;
    466   1.2      matt #endif
    467  1.15      matt 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
    468  1.15      matt 	pmap_pvlist_check(mdpg);
    469   1.2      matt 
    470  1.57     skrll 	UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv,
    471  1.56     skrll 	    (uintptr_t)pv->pv_pmap, 0, 0);
    472  1.53     skrll 
    473   1.1  christos 	if (pv->pv_pmap != NULL) {
    474   1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
    475   1.1  christos #ifdef MULTIPROCESSOR
    476  1.57     skrll 			UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx",
    477  1.53     skrll 			    (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0);
    478   1.2      matt 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    479   1.2      matt 			if (kcpuset_match(onproc, kcpuset_running)) {
    480   1.1  christos 				break;
    481   1.1  christos 			}
    482   1.1  christos #else
    483   1.1  christos 			if (pv->pv_pmap == curpmap) {
    484  1.75     skrll 				onproc = curcpu()->ci_kcpuset;
    485   1.1  christos 				break;
    486   1.1  christos 			}
    487   1.1  christos #endif
    488   1.1  christos 		}
    489   1.1  christos 	}
    490  1.15      matt 	pmap_pvlist_check(mdpg);
    491   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    492   1.1  christos 	kpreempt_disable();
    493  1.58     skrll 	pmap_md_page_syncicache(mdpg, onproc);
    494  1.15      matt 	kpreempt_enable();
    495   1.2      matt #ifdef MULTIPROCESSOR
    496   1.2      matt 	kcpuset_destroy(onproc);
    497   1.2      matt #endif
    498   1.1  christos }
    499   1.1  christos 
    500   1.1  christos /*
    501   1.1  christos  * Define the initial bounds of the kernel virtual address space.
    502   1.1  christos  */
    503   1.1  christos void
    504   1.1  christos pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    505   1.1  christos {
    506  1.12      matt 	*vstartp = pmap_limits.virtual_start;
    507  1.12      matt 	*vendp = pmap_limits.virtual_end;
    508   1.1  christos }
    509   1.1  christos 
    510   1.1  christos vaddr_t
    511   1.1  christos pmap_growkernel(vaddr_t maxkvaddr)
    512   1.1  christos {
    513  1.61     skrll 	UVMHIST_FUNC(__func__);
    514  1.61     skrll 	UVMHIST_CALLARGS(pmaphist, "maxkvaddr=%#jx (%#jx)", maxkvaddr,
    515  1.61     skrll 	    pmap_curmaxkvaddr, 0, 0);
    516  1.61     skrll 
    517  1.61     skrll 	vaddr_t virtual_end = pmap_curmaxkvaddr;
    518   1.1  christos 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    519   1.1  christos 
    520   1.1  christos 	/*
    521  1.61     skrll 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    522  1.61     skrll 	 */
    523  1.61     skrll 	if (maxkvaddr == 0 || maxkvaddr > VM_MAX_KERNEL_ADDRESS)
    524  1.61     skrll 		maxkvaddr = VM_MAX_KERNEL_ADDRESS;
    525  1.61     skrll 
    526  1.61     skrll 	/*
    527   1.1  christos 	 * Reserve PTEs for the new KVA space.
    528   1.1  christos 	 */
    529   1.1  christos 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    530   1.1  christos 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    531   1.1  christos 	}
    532   1.1  christos 
    533  1.61     skrll 	kasan_shadow_map((void *)pmap_curmaxkvaddr,
    534  1.61     skrll 	    (size_t)(virtual_end - pmap_curmaxkvaddr));
    535   1.1  christos 
    536   1.1  christos 	/*
    537   1.1  christos 	 * Update new end.
    538   1.1  christos 	 */
    539  1.61     skrll 	pmap_curmaxkvaddr = virtual_end;
    540  1.61     skrll 
    541  1.61     skrll 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    542  1.61     skrll 
    543   1.1  christos 	return virtual_end;
    544   1.1  christos }
    545   1.1  christos 
    546   1.1  christos /*
    547   1.1  christos  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    548   1.1  christos  * This function allows for early dynamic memory allocation until the virtual
    549   1.1  christos  * memory system has been bootstrapped.  After that point, either kmem_alloc
    550   1.1  christos  * or malloc should be used.  This function works by stealing pages from the
    551   1.1  christos  * (to be) managed page pool, then implicitly mapping the pages (by using
    552  1.41     skrll  * their direct mapped addresses) and zeroing them.
    553   1.1  christos  *
    554   1.1  christos  * It may be used once the physical memory segments have been pre-loaded
    555   1.1  christos  * into the vm_physmem[] array.  Early memory allocation MUST use this
    556   1.1  christos  * interface!  This cannot be used after vm_page_startup(), and will
    557   1.1  christos  * generate a panic if tried.
    558   1.1  christos  *
    559   1.1  christos  * Note that this memory will never be freed, and in essence it is wired
    560   1.1  christos  * down.
    561   1.1  christos  *
    562   1.1  christos  * We must adjust *vstartp and/or *vendp iff we use address space
    563   1.1  christos  * from the kernel virtual address range defined by pmap_virtual_space().
    564   1.1  christos  */
    565   1.1  christos vaddr_t
    566   1.1  christos pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    567   1.1  christos {
    568  1.15      matt 	size_t npgs;
    569   1.1  christos 	paddr_t pa;
    570   1.1  christos 	vaddr_t va;
    571  1.26    cherry 
    572  1.27     skrll 	uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
    573   1.1  christos 
    574   1.1  christos 	size = round_page(size);
    575   1.1  christos 	npgs = atop(size);
    576   1.1  christos 
    577  1.76     skrll 	DPRINTF("%s: need %zu pages\n", __func__, npgs);
    578  1.15      matt 
    579  1.26    cherry 	for (uvm_physseg_t bank = uvm_physseg_get_first();
    580  1.26    cherry 	     uvm_physseg_valid_p(bank);
    581  1.26    cherry 	     bank = uvm_physseg_get_next(bank)) {
    582  1.26    cherry 
    583   1.1  christos 		if (uvm.page_init_done == true)
    584   1.1  christos 			panic("pmap_steal_memory: called _after_ bootstrap");
    585   1.1  christos 
    586  1.76     skrll 		DPRINTF("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
    587  1.15      matt 		    __func__, bank,
    588  1.26    cherry 		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
    589  1.26    cherry 		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
    590  1.15      matt 
    591  1.26    cherry 		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
    592  1.26    cherry 		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
    593  1.76     skrll 			DPRINTF("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
    594   1.1  christos 			continue;
    595  1.15      matt 		}
    596   1.1  christos 
    597  1.26    cherry 		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
    598  1.76     skrll 			DPRINTF("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
    599  1.15      matt 			    __func__, bank, npgs);
    600   1.1  christos 			continue;
    601  1.15      matt 		}
    602  1.15      matt 
    603  1.26    cherry 		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
    604  1.15      matt 			continue;
    605  1.15      matt 		}
    606  1.15      matt 
    607  1.15      matt 		/*
    608  1.15      matt 		 * Always try to allocate from the segment with the least
    609  1.15      matt 		 * amount of space left.
    610  1.15      matt 		 */
    611  1.26    cherry #define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
    612  1.26    cherry 		if (uvm_physseg_valid_p(maybe_bank) == false
    613  1.26    cherry 		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
    614  1.15      matt 			maybe_bank = bank;
    615  1.15      matt 		}
    616  1.15      matt 	}
    617  1.15      matt 
    618  1.26    cherry 	if (uvm_physseg_valid_p(maybe_bank)) {
    619  1.26    cherry 		const uvm_physseg_t bank = maybe_bank;
    620  1.29     skrll 
    621   1.1  christos 		/*
    622   1.1  christos 		 * There are enough pages here; steal them!
    623   1.1  christos 		 */
    624  1.26    cherry 		pa = ptoa(uvm_physseg_get_start(bank));
    625  1.26    cherry 		uvm_physseg_unplug(atop(pa), npgs);
    626   1.1  christos 
    627  1.76     skrll 		DPRINTF("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
    628  1.26    cherry 		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
    629   1.1  christos 
    630   1.1  christos 		va = pmap_md_map_poolpage(pa, size);
    631   1.1  christos 		memset((void *)va, 0, size);
    632   1.1  christos 		return va;
    633   1.1  christos 	}
    634   1.1  christos 
    635   1.1  christos 	/*
    636   1.1  christos 	 * If we got here, there was no memory left.
    637   1.1  christos 	 */
    638  1.15      matt 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
    639   1.1  christos }
    640   1.1  christos 
    641   1.1  christos /*
    642  1.46   thorpej  *	Bootstrap the system enough to run with virtual memory.
    643  1.46   thorpej  *	(Common routine called by machine-dependent bootstrap code.)
    644  1.46   thorpej  */
    645  1.46   thorpej void
    646  1.46   thorpej pmap_bootstrap_common(void)
    647  1.46   thorpej {
    648  1.69     skrll 	UVMHIST_LINK_STATIC(pmapexechist);
    649  1.69     skrll 	UVMHIST_LINK_STATIC(pmaphist);
    650  1.69     skrll 	UVMHIST_LINK_STATIC(pmapxtabhist);
    651  1.69     skrll 
    652  1.69     skrll 	static const struct uvm_pagerops pmap_pager = {
    653  1.69     skrll 		/* nothing */
    654  1.69     skrll 	};
    655  1.69     skrll 
    656  1.69     skrll 	pmap_t pm = pmap_kernel();
    657  1.69     skrll 
    658  1.69     skrll 	rw_init(&pm->pm_obj_lock);
    659  1.69     skrll 	uvm_obj_init(&pm->pm_uobject, &pmap_pager, false, 1);
    660  1.69     skrll 	uvm_obj_setlock(&pm->pm_uobject, &pm->pm_obj_lock);
    661  1.69     skrll 
    662  1.69     skrll 	TAILQ_INIT(&pm->pm_ppg_list);
    663  1.69     skrll 
    664  1.69     skrll #if defined(PMAP_HWPAGEWALKER)
    665  1.69     skrll 	TAILQ_INIT(&pm->pm_pdetab_list);
    666  1.69     skrll #endif
    667  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    668  1.69     skrll 	TAILQ_INIT(&pm->pm_segtab_list);
    669  1.69     skrll #endif
    670  1.69     skrll 
    671  1.74     skrll #if defined(EFI_RUNTIME)
    672  1.74     skrll 
    673  1.74     skrll 	const pmap_t efipm = pmap_efirt();
    674  1.74     skrll 	struct pmap_asid_info * const efipai = PMAP_PAI(efipm, cpu_tlb_info(ci));
    675  1.74     skrll 
    676  1.74     skrll 	rw_init(&efipm->pm_obj_lock);
    677  1.74     skrll 	uvm_obj_init(&efipm->pm_uobject, &pmap_pager, false, 1);
    678  1.74     skrll 	uvm_obj_setlock(&efipm->pm_uobject, &efipm->pm_obj_lock);
    679  1.74     skrll 
    680  1.74     skrll 	efipai->pai_asid = KERNEL_PID;
    681  1.74     skrll 
    682  1.74     skrll 	TAILQ_INIT(&efipm->pm_ppg_list);
    683  1.74     skrll 
    684  1.74     skrll #if defined(PMAP_HWPAGEWALKER)
    685  1.74     skrll 	TAILQ_INIT(&efipm->pm_pdetab_list);
    686  1.74     skrll #endif
    687  1.74     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    688  1.74     skrll 	TAILQ_INIT(&efipm->pm_segtab_list);
    689  1.74     skrll #endif
    690  1.74     skrll 
    691  1.74     skrll #endif
    692  1.74     skrll 
    693  1.74     skrll 	/*
    694  1.74     skrll 	 * Initialize the segtab lock.
    695  1.74     skrll 	 */
    696  1.74     skrll 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    697  1.74     skrll 
    698  1.46   thorpej 	pmap_tlb_miss_lock_init();
    699  1.46   thorpej }
    700  1.46   thorpej 
    701  1.46   thorpej /*
    702   1.1  christos  *	Initialize the pmap module.
    703   1.1  christos  *	Called by vm_init, to initialize any structures that the pmap
    704   1.1  christos  *	system needs to map virtual memory.
    705   1.1  christos  */
    706   1.1  christos void
    707   1.1  christos pmap_init(void)
    708   1.1  christos {
    709  1.49     skrll 	UVMHIST_FUNC(__func__);
    710  1.49     skrll 	UVMHIST_CALLED(pmaphist);
    711   1.1  christos 
    712   1.1  christos 	/*
    713   1.1  christos 	 * Set a low water mark on the pv_entry pool, so that we are
    714   1.1  christos 	 * more likely to have these around even in extreme memory
    715   1.1  christos 	 * starvation.
    716   1.1  christos 	 */
    717   1.1  christos 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    718   1.1  christos 
    719  1.15      matt 	/*
    720  1.15      matt 	 * Set the page colormask but allow pmap_md_init to override it.
    721  1.15      matt 	 */
    722  1.15      matt 	pmap_page_colormask = ptoa(uvmexp.colormask);
    723  1.15      matt 
    724   1.1  christos 	pmap_md_init();
    725   1.1  christos 
    726   1.1  christos 	/*
    727   1.1  christos 	 * Now it is safe to enable pv entry recording.
    728   1.1  christos 	 */
    729   1.1  christos 	pmap_initialized = true;
    730   1.1  christos }
    731   1.1  christos 
    732   1.1  christos /*
    733   1.1  christos  *	Create and return a physical map.
    734   1.1  christos  *
    735   1.1  christos  *	If the size specified for the map
    736   1.1  christos  *	is zero, the map is an actual physical
    737   1.1  christos  *	map, and may be referenced by the
    738   1.1  christos  *	hardware.
    739   1.1  christos  *
    740   1.1  christos  *	If the size specified is non-zero,
    741   1.1  christos  *	the map will be used in software only, and
    742   1.1  christos  *	is bounded by that size.
    743   1.1  christos  */
    744   1.1  christos pmap_t
    745   1.1  christos pmap_create(void)
    746   1.1  christos {
    747  1.49     skrll 	UVMHIST_FUNC(__func__);
    748  1.49     skrll 	UVMHIST_CALLED(pmaphist);
    749   1.1  christos 	PMAP_COUNT(create);
    750   1.1  christos 
    751  1.69     skrll 	static const struct uvm_pagerops pmap_pager = {
    752  1.69     skrll 		/* nothing */
    753  1.69     skrll 	};
    754  1.69     skrll 
    755  1.15      matt 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    756   1.1  christos 	memset(pmap, 0, PMAP_SIZE);
    757   1.1  christos 
    758   1.1  christos 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    759   1.1  christos 
    760  1.70     skrll 	pmap->pm_refcnt = 1;
    761   1.1  christos 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    762   1.1  christos 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    763   1.1  christos 
    764  1.69     skrll 	rw_init(&pmap->pm_obj_lock);
    765  1.69     skrll 	uvm_obj_init(&pmap->pm_uobject, &pmap_pager, false, 1);
    766  1.69     skrll 	uvm_obj_setlock(&pmap->pm_uobject, &pmap->pm_obj_lock);
    767  1.69     skrll 
    768  1.69     skrll 	TAILQ_INIT(&pmap->pm_ppg_list);
    769  1.69     skrll #if defined(PMAP_HWPAGEWALKER)
    770  1.69     skrll 	TAILQ_INIT(&pmap->pm_pdetab_list);
    771  1.69     skrll #endif
    772  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    773  1.69     skrll 	TAILQ_INIT(&pmap->pm_segtab_list);
    774  1.69     skrll #endif
    775  1.69     skrll 
    776   1.1  christos 	pmap_segtab_init(pmap);
    777   1.1  christos 
    778   1.5    nonaka #ifdef MULTIPROCESSOR
    779   1.5    nonaka 	kcpuset_create(&pmap->pm_active, true);
    780   1.5    nonaka 	kcpuset_create(&pmap->pm_onproc, true);
    781  1.15      matt 	KASSERT(pmap->pm_active != NULL);
    782  1.15      matt 	KASSERT(pmap->pm_onproc != NULL);
    783   1.5    nonaka #endif
    784   1.5    nonaka 
    785  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
    786  1.37  pgoyette 	    0, 0, 0);
    787  1.15      matt 
    788   1.1  christos 	return pmap;
    789   1.1  christos }
    790   1.1  christos 
    791   1.1  christos /*
    792   1.1  christos  *	Retire the given physical map from service.
    793   1.1  christos  *	Should only be called if the map contains
    794   1.1  christos  *	no valid mappings.
    795   1.1  christos  */
    796   1.1  christos void
    797   1.1  christos pmap_destroy(pmap_t pmap)
    798   1.1  christos {
    799  1.49     skrll 	UVMHIST_FUNC(__func__);
    800  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    801  1.69     skrll 	UVMHIST_CALLARGS(pmapxtabhist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    802   1.1  christos 
    803  1.64  riastrad 	membar_release();
    804  1.70     skrll 	if (atomic_dec_uint_nv(&pmap->pm_refcnt) > 0) {
    805   1.1  christos 		PMAP_COUNT(dereference);
    806  1.15      matt 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
    807  1.69     skrll 		UVMHIST_LOG(pmapxtabhist, " <-- done (deref)", 0, 0, 0, 0);
    808   1.1  christos 		return;
    809   1.1  christos 	}
    810  1.64  riastrad 	membar_acquire();
    811   1.1  christos 
    812  1.15      matt 	PMAP_COUNT(destroy);
    813  1.70     skrll 	KASSERT(pmap->pm_refcnt == 0);
    814   1.1  christos 	kpreempt_disable();
    815  1.46   thorpej 	pmap_tlb_miss_lock_enter();
    816   1.1  christos 	pmap_tlb_asid_release_all(pmap);
    817  1.71     skrll 	pmap_tlb_miss_lock_exit();
    818   1.1  christos 	pmap_segtab_destroy(pmap, NULL, 0);
    819   1.1  christos 
    820  1.69     skrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_ppg_list));
    821  1.69     skrll 
    822  1.69     skrll #ifdef _LP64
    823  1.69     skrll #if defined(PMAP_HWPAGEWALKER)
    824  1.69     skrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_pdetab_list));
    825  1.69     skrll #endif
    826  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    827  1.69     skrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_segtab_list));
    828  1.69     skrll #endif
    829  1.69     skrll #endif
    830  1.69     skrll 	KASSERT(pmap->pm_uobject.uo_npages == 0);
    831  1.69     skrll 
    832  1.69     skrll 	uvm_obj_destroy(&pmap->pm_uobject, false);
    833  1.69     skrll 	rw_destroy(&pmap->pm_obj_lock);
    834  1.69     skrll 
    835   1.6    nonaka #ifdef MULTIPROCESSOR
    836   1.7    nonaka 	kcpuset_destroy(pmap->pm_active);
    837   1.7    nonaka 	kcpuset_destroy(pmap->pm_onproc);
    838  1.15      matt 	pmap->pm_active = NULL;
    839  1.15      matt 	pmap->pm_onproc = NULL;
    840   1.6    nonaka #endif
    841   1.6    nonaka 
    842   1.1  christos 	pool_put(&pmap_pmap_pool, pmap);
    843   1.1  christos 	kpreempt_enable();
    844   1.1  christos 
    845  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
    846  1.69     skrll 	UVMHIST_LOG(pmapxtabhist, " <-- done (freed)", 0, 0, 0, 0);
    847   1.1  christos }
    848   1.1  christos 
    849   1.1  christos /*
    850   1.1  christos  *	Add a reference to the specified pmap.
    851   1.1  christos  */
    852   1.1  christos void
    853   1.1  christos pmap_reference(pmap_t pmap)
    854   1.1  christos {
    855  1.49     skrll 	UVMHIST_FUNC(__func__);
    856  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
    857   1.1  christos 	PMAP_COUNT(reference);
    858   1.1  christos 
    859   1.1  christos 	if (pmap != NULL) {
    860  1.70     skrll 		atomic_inc_uint(&pmap->pm_refcnt);
    861   1.1  christos 	}
    862   1.1  christos 
    863  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
    864   1.1  christos }
    865   1.1  christos 
    866   1.1  christos /*
    867   1.1  christos  *	Make a new pmap (vmspace) active for the given process.
    868   1.1  christos  */
    869   1.1  christos void
    870   1.1  christos pmap_activate(struct lwp *l)
    871   1.1  christos {
    872   1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    873   1.1  christos 
    874  1.49     skrll 	UVMHIST_FUNC(__func__);
    875  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
    876  1.37  pgoyette 	    (uintptr_t)pmap, 0, 0);
    877   1.1  christos 	PMAP_COUNT(activate);
    878   1.1  christos 
    879   1.1  christos 	kpreempt_disable();
    880  1.46   thorpej 	pmap_tlb_miss_lock_enter();
    881   1.1  christos 	pmap_tlb_asid_acquire(pmap, l);
    882  1.50     skrll 	pmap_segtab_activate(pmap, l);
    883  1.46   thorpej 	pmap_tlb_miss_lock_exit();
    884   1.1  christos 	kpreempt_enable();
    885   1.1  christos 
    886  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
    887  1.37  pgoyette 	    l->l_lid, 0, 0);
    888  1.15      matt }
    889  1.15      matt 
    890  1.15      matt /*
    891  1.15      matt  * Remove this page from all physical maps in which it resides.
    892  1.15      matt  * Reflects back modify bits to the pager.
    893  1.15      matt  */
    894  1.15      matt void
    895  1.58     skrll pmap_page_remove(struct vm_page_md *mdpg)
    896  1.15      matt {
    897  1.15      matt 	kpreempt_disable();
    898  1.15      matt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
    899  1.15      matt 	pmap_pvlist_check(mdpg);
    900  1.15      matt 
    901  1.58     skrll 	struct vm_page * const pg =
    902  1.58     skrll 	    VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
    903  1.58     skrll 
    904  1.49     skrll 	UVMHIST_FUNC(__func__);
    905  1.58     skrll 	if (pg) {
    906  1.58     skrll 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
    907  1.58     skrll 		    "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
    908  1.58     skrll 		    VM_PAGE_TO_PHYS(pg), 0);
    909  1.58     skrll 	} else {
    910  1.58     skrll 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
    911  1.58     skrll 		    0, 0);
    912  1.58     skrll 	}
    913  1.58     skrll 
    914  1.22      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    915  1.73     skrll 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE | VM_PAGEMD_UNCACHED);
    916  1.22      matt #else
    917  1.22      matt 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
    918  1.22      matt #endif
    919  1.22      matt 	PMAP_COUNT(exec_uncached_remove);
    920  1.22      matt 
    921  1.15      matt 	pv_entry_t pv = &mdpg->mdpg_first;
    922  1.15      matt 	if (pv->pv_pmap == NULL) {
    923  1.15      matt 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    924  1.15      matt 		kpreempt_enable();
    925  1.15      matt 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
    926  1.15      matt 		return;
    927  1.15      matt 	}
    928  1.15      matt 
    929  1.15      matt 	pv_entry_t npv;
    930  1.15      matt 	pv_entry_t pvp = NULL;
    931  1.15      matt 
    932  1.15      matt 	for (; pv != NULL; pv = npv) {
    933  1.15      matt 		npv = pv->pv_next;
    934  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
    935  1.42     skrll 		if (PV_ISKENTER_P(pv)) {
    936  1.57     skrll 			UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    937  1.37  pgoyette 			    " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
    938  1.37  pgoyette 			    pv->pv_va, 0);
    939  1.15      matt 
    940  1.15      matt 			KASSERT(pv->pv_pmap == pmap_kernel());
    941  1.15      matt 
    942  1.15      matt 			/* Assume no more - it'll get fixed if there are */
    943  1.15      matt 			pv->pv_next = NULL;
    944  1.15      matt 
    945  1.15      matt 			/*
    946  1.15      matt 			 * pvp is non-null when we already have a PV_KENTER
    947  1.15      matt 			 * pv in pvh_first; otherwise we haven't seen a
    948  1.15      matt 			 * PV_KENTER pv and we need to copy this one to
    949  1.15      matt 			 * pvh_first
    950  1.15      matt 			 */
    951  1.15      matt 			if (pvp) {
    952  1.15      matt 				/*
    953  1.15      matt 				 * The previous PV_KENTER pv needs to point to
    954  1.15      matt 				 * this PV_KENTER pv
    955  1.15      matt 				 */
    956  1.15      matt 				pvp->pv_next = pv;
    957  1.15      matt 			} else {
    958  1.15      matt 				pv_entry_t fpv = &mdpg->mdpg_first;
    959  1.15      matt 				*fpv = *pv;
    960  1.15      matt 				KASSERT(fpv->pv_pmap == pmap_kernel());
    961  1.15      matt 			}
    962  1.15      matt 			pvp = pv;
    963  1.15      matt 			continue;
    964  1.15      matt 		}
    965  1.15      matt #endif
    966  1.15      matt 		const pmap_t pmap = pv->pv_pmap;
    967  1.15      matt 		vaddr_t va = trunc_page(pv->pv_va);
    968  1.15      matt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    969  1.15      matt 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
    970  1.15      matt 		    pmap_limits.virtual_end);
    971  1.15      matt 		pt_entry_t pte = *ptep;
    972  1.57     skrll 		UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
    973  1.57     skrll 		    " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va,
    974  1.37  pgoyette 		    pte_value(pte));
    975  1.15      matt 		if (!pte_valid_p(pte))
    976  1.15      matt 			continue;
    977  1.15      matt 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    978  1.15      matt 		if (is_kernel_pmap_p) {
    979  1.15      matt 			PMAP_COUNT(remove_kernel_pages);
    980  1.15      matt 		} else {
    981  1.15      matt 			PMAP_COUNT(remove_user_pages);
    982  1.15      matt 		}
    983  1.15      matt 		if (pte_wired_p(pte))
    984  1.15      matt 			pmap->pm_stats.wired_count--;
    985  1.15      matt 		pmap->pm_stats.resident_count--;
    986  1.15      matt 
    987  1.46   thorpej 		pmap_tlb_miss_lock_enter();
    988  1.15      matt 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    989  1.35     skrll 		pte_set(ptep, npte);
    990  1.36     skrll 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
    991  1.36     skrll 			/*
    992  1.36     skrll 			 * Flush the TLB for the given address.
    993  1.36     skrll 			 */
    994  1.36     skrll 			pmap_tlb_invalidate_addr(pmap, va);
    995  1.36     skrll 		}
    996  1.46   thorpej 		pmap_tlb_miss_lock_exit();
    997  1.15      matt 
    998  1.15      matt 		/*
    999  1.15      matt 		 * non-null means this is a non-pvh_first pv, so we should
   1000  1.15      matt 		 * free it.
   1001  1.15      matt 		 */
   1002  1.15      matt 		if (pvp) {
   1003  1.15      matt 			KASSERT(pvp->pv_pmap == pmap_kernel());
   1004  1.15      matt 			KASSERT(pvp->pv_next == NULL);
   1005  1.15      matt 			pmap_pv_free(pv);
   1006  1.15      matt 		} else {
   1007  1.15      matt 			pv->pv_pmap = NULL;
   1008  1.15      matt 			pv->pv_next = NULL;
   1009  1.15      matt 		}
   1010  1.15      matt 	}
   1011  1.15      matt 
   1012  1.15      matt 	pmap_pvlist_check(mdpg);
   1013  1.15      matt 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1014  1.15      matt 	kpreempt_enable();
   1015  1.15      matt 
   1016  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1017   1.1  christos }
   1018   1.1  christos 
   1019  1.58     skrll #ifdef __HAVE_PMAP_PV_TRACK
   1020  1.58     skrll /*
   1021  1.58     skrll  * pmap_pv_protect: change protection of an unmanaged pv-tracked page from
   1022  1.58     skrll  * all pmaps that map it
   1023  1.58     skrll  */
   1024  1.58     skrll void
   1025  1.58     skrll pmap_pv_protect(paddr_t pa, vm_prot_t prot)
   1026  1.58     skrll {
   1027  1.58     skrll 
   1028  1.58     skrll 	/* the only case is remove at the moment */
   1029  1.58     skrll 	KASSERT(prot == VM_PROT_NONE);
   1030  1.58     skrll 	struct pmap_page *pp;
   1031  1.58     skrll 
   1032  1.58     skrll 	pp = pmap_pv_tracked(pa);
   1033  1.58     skrll 	if (pp == NULL)
   1034  1.58     skrll 		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
   1035  1.58     skrll 		    pa);
   1036  1.58     skrll 
   1037  1.58     skrll 	struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
   1038  1.58     skrll 	pmap_page_remove(mdpg);
   1039  1.58     skrll }
   1040  1.58     skrll #endif
   1041  1.15      matt 
   1042   1.1  christos /*
   1043   1.1  christos  *	Make a previously active pmap (vmspace) inactive.
   1044   1.1  christos  */
   1045   1.1  christos void
   1046   1.1  christos pmap_deactivate(struct lwp *l)
   1047   1.1  christos {
   1048   1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1049   1.1  christos 
   1050  1.49     skrll 	UVMHIST_FUNC(__func__);
   1051  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
   1052  1.37  pgoyette 	    (uintptr_t)pmap, 0, 0);
   1053   1.1  christos 	PMAP_COUNT(deactivate);
   1054   1.1  christos 
   1055   1.1  christos 	kpreempt_disable();
   1056  1.15      matt 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
   1057  1.46   thorpej 	pmap_tlb_miss_lock_enter();
   1058   1.1  christos 	pmap_tlb_asid_deactivate(pmap);
   1059  1.51     skrll 	pmap_segtab_deactivate(pmap);
   1060  1.46   thorpej 	pmap_tlb_miss_lock_exit();
   1061   1.1  christos 	kpreempt_enable();
   1062   1.1  christos 
   1063  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
   1064  1.37  pgoyette 	    l->l_lid, 0, 0);
   1065   1.1  christos }
   1066   1.1  christos 
   1067   1.1  christos void
   1068   1.1  christos pmap_update(struct pmap *pmap)
   1069   1.1  christos {
   1070  1.49     skrll 	UVMHIST_FUNC(__func__);
   1071  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1072   1.1  christos 	PMAP_COUNT(update);
   1073   1.1  christos 
   1074   1.1  christos 	kpreempt_disable();
   1075  1.18     skrll #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
   1076   1.1  christos 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
   1077   1.1  christos 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
   1078   1.1  christos 		PMAP_COUNT(shootdown_ipis);
   1079   1.1  christos #endif
   1080  1.46   thorpej 	pmap_tlb_miss_lock_enter();
   1081  1.11    nonaka #if defined(DEBUG) && !defined(MULTIPROCESSOR)
   1082   1.1  christos 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
   1083   1.1  christos #endif /* DEBUG */
   1084   1.1  christos 
   1085   1.1  christos 	/*
   1086   1.1  christos 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
   1087   1.1  christos 	 * our ASID.  Now we have to reactivate ourselves.
   1088   1.1  christos 	 */
   1089   1.1  christos 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
   1090   1.1  christos 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
   1091   1.1  christos 		pmap_tlb_asid_acquire(pmap, curlwp);
   1092   1.1  christos 		pmap_segtab_activate(pmap, curlwp);
   1093   1.1  christos 	}
   1094  1.46   thorpej 	pmap_tlb_miss_lock_exit();
   1095   1.1  christos 	kpreempt_enable();
   1096   1.1  christos 
   1097  1.59     skrll 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%jd)",
   1098  1.37  pgoyette 		    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
   1099   1.1  christos }
   1100   1.1  christos 
   1101   1.1  christos /*
   1102   1.1  christos  *	Remove the given range of addresses from the specified map.
   1103   1.1  christos  *
   1104   1.1  christos  *	It is assumed that the start and end are properly
   1105   1.1  christos  *	rounded to the page size.
   1106   1.1  christos  */
   1107   1.1  christos 
   1108   1.1  christos static bool
   1109   1.1  christos pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1110  1.66     skrll     uintptr_t flags)
   1111   1.1  christos {
   1112   1.1  christos 	const pt_entry_t npte = flags;
   1113   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1114   1.1  christos 
   1115  1.49     skrll 	UVMHIST_FUNC(__func__);
   1116  1.59     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
   1117  1.66     skrll 	    (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
   1118  1.49     skrll 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1119  1.37  pgoyette 	    (uintptr_t)ptep, flags, 0, 0);
   1120   1.1  christos 
   1121   1.1  christos 	KASSERT(kpreempt_disabled());
   1122   1.1  christos 
   1123   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
   1124  1.15      matt 		const pt_entry_t pte = *ptep;
   1125  1.15      matt 		if (!pte_valid_p(pte))
   1126   1.1  christos 			continue;
   1127  1.15      matt 		if (is_kernel_pmap_p) {
   1128  1.15      matt 			PMAP_COUNT(remove_kernel_pages);
   1129  1.15      matt 		} else {
   1130   1.1  christos 			PMAP_COUNT(remove_user_pages);
   1131  1.15      matt 		}
   1132  1.15      matt 		if (pte_wired_p(pte))
   1133   1.1  christos 			pmap->pm_stats.wired_count--;
   1134   1.1  christos 		pmap->pm_stats.resident_count--;
   1135  1.15      matt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1136   1.1  christos 		if (__predict_true(pg != NULL)) {
   1137  1.15      matt 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
   1138   1.1  christos 		}
   1139  1.46   thorpej 		pmap_tlb_miss_lock_enter();
   1140  1.35     skrll 		pte_set(ptep, npte);
   1141  1.36     skrll 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
   1142  1.36     skrll 			/*
   1143  1.36     skrll 			 * Flush the TLB for the given address.
   1144  1.36     skrll 			 */
   1145  1.36     skrll 			pmap_tlb_invalidate_addr(pmap, sva);
   1146  1.36     skrll 		}
   1147  1.46   thorpej 		pmap_tlb_miss_lock_exit();
   1148   1.1  christos 	}
   1149  1.15      matt 
   1150  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1151  1.15      matt 
   1152   1.1  christos 	return false;
   1153   1.1  christos }
   1154   1.1  christos 
   1155   1.1  christos void
   1156   1.1  christos pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
   1157   1.1  christos {
   1158   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1159   1.1  christos 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
   1160   1.1  christos 
   1161  1.49     skrll 	UVMHIST_FUNC(__func__);
   1162  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
   1163  1.37  pgoyette 	    (uintptr_t)pmap, sva, eva, 0);
   1164   1.1  christos 
   1165  1.15      matt 	if (is_kernel_pmap_p) {
   1166   1.1  christos 		PMAP_COUNT(remove_kernel_calls);
   1167  1.15      matt 	} else {
   1168   1.1  christos 		PMAP_COUNT(remove_user_calls);
   1169   1.1  christos 	}
   1170  1.15      matt #ifdef PMAP_FAULTINFO
   1171  1.15      matt 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1172  1.15      matt 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1173  1.60     skrll 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
   1174   1.1  christos #endif
   1175   1.1  christos 	kpreempt_disable();
   1176  1.15      matt 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1177   1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
   1178   1.1  christos 	kpreempt_enable();
   1179   1.1  christos 
   1180  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1181   1.1  christos }
   1182   1.1  christos 
   1183   1.1  christos /*
   1184   1.1  christos  *	pmap_page_protect:
   1185   1.1  christos  *
   1186   1.1  christos  *	Lower the permission for all mappings to a given page.
   1187   1.1  christos  */
   1188   1.1  christos void
   1189   1.1  christos pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1190   1.1  christos {
   1191   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1192   1.1  christos 	pv_entry_t pv;
   1193   1.1  christos 	vaddr_t va;
   1194   1.1  christos 
   1195  1.49     skrll 	UVMHIST_FUNC(__func__);
   1196  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
   1197  1.37  pgoyette 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
   1198   1.1  christos 	PMAP_COUNT(page_protect);
   1199   1.1  christos 
   1200   1.1  christos 	switch (prot) {
   1201  1.73     skrll 	case VM_PROT_READ | VM_PROT_WRITE:
   1202   1.1  christos 	case VM_PROT_ALL:
   1203   1.1  christos 		break;
   1204   1.1  christos 
   1205   1.1  christos 	/* copy_on_write */
   1206   1.1  christos 	case VM_PROT_READ:
   1207  1.73     skrll 	case VM_PROT_READ | VM_PROT_EXECUTE:
   1208   1.1  christos 		pv = &mdpg->mdpg_first;
   1209  1.15      matt 		kpreempt_disable();
   1210  1.15      matt 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1211  1.15      matt 		pmap_pvlist_check(mdpg);
   1212   1.1  christos 		/*
   1213  1.33     skrll 		 * Loop over all current mappings setting/clearing as
   1214  1.33     skrll 		 * appropriate.
   1215   1.1  christos 		 */
   1216   1.1  christos 		if (pv->pv_pmap != NULL) {
   1217   1.1  christos 			while (pv != NULL) {
   1218  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1219  1.42     skrll 				if (PV_ISKENTER_P(pv)) {
   1220  1.15      matt 					pv = pv->pv_next;
   1221  1.15      matt 					continue;
   1222  1.15      matt 				}
   1223  1.15      matt #endif
   1224   1.1  christos 				const pmap_t pmap = pv->pv_pmap;
   1225  1.15      matt 				va = trunc_page(pv->pv_va);
   1226  1.15      matt 				const uintptr_t gen =
   1227  1.15      matt 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1228   1.1  christos 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
   1229   1.1  christos 				KASSERT(pv->pv_pmap == pmap);
   1230   1.1  christos 				pmap_update(pmap);
   1231  1.15      matt 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
   1232   1.1  christos 					pv = &mdpg->mdpg_first;
   1233   1.1  christos 				} else {
   1234   1.1  christos 					pv = pv->pv_next;
   1235   1.1  christos 				}
   1236  1.15      matt 				pmap_pvlist_check(mdpg);
   1237   1.1  christos 			}
   1238   1.1  christos 		}
   1239  1.15      matt 		pmap_pvlist_check(mdpg);
   1240   1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1241  1.15      matt 		kpreempt_enable();
   1242   1.1  christos 		break;
   1243   1.1  christos 
   1244   1.1  christos 	/* remove_all */
   1245   1.1  christos 	default:
   1246  1.58     skrll 		pmap_page_remove(mdpg);
   1247   1.1  christos 	}
   1248   1.1  christos 
   1249  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1250   1.1  christos }
   1251   1.1  christos 
   1252   1.1  christos static bool
   1253   1.1  christos pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1254   1.1  christos 	uintptr_t flags)
   1255   1.1  christos {
   1256   1.1  christos 	const vm_prot_t prot = (flags & VM_PROT_ALL);
   1257   1.1  christos 
   1258  1.49     skrll 	UVMHIST_FUNC(__func__);
   1259  1.59     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
   1260  1.40  pgoyette 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
   1261  1.37  pgoyette 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
   1262  1.37  pgoyette 	    (uintptr_t)ptep, flags, 0, 0);
   1263   1.1  christos 
   1264   1.1  christos 	KASSERT(kpreempt_disabled());
   1265   1.1  christos 	/*
   1266   1.1  christos 	 * Change protection on every valid mapping within this segment.
   1267   1.1  christos 	 */
   1268   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
   1269  1.15      matt 		pt_entry_t pte = *ptep;
   1270  1.15      matt 		if (!pte_valid_p(pte))
   1271   1.1  christos 			continue;
   1272  1.15      matt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1273  1.15      matt 		if (pg != NULL && pte_modified_p(pte)) {
   1274   1.1  christos 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1275   1.1  christos 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1276  1.44     skrll 				KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg));
   1277  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1278  1.15      matt 				if (VM_PAGEMD_CACHED_P(mdpg)) {
   1279  1.15      matt #endif
   1280   1.1  christos 					UVMHIST_LOG(pmapexechist,
   1281  1.37  pgoyette 					    "pg %#jx (pa %#jx): "
   1282  1.28       mrg 					    "syncicached performed",
   1283  1.37  pgoyette 					    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
   1284  1.37  pgoyette 					    0, 0);
   1285   1.1  christos 					pmap_page_syncicache(pg);
   1286   1.1  christos 					PMAP_COUNT(exec_synced_protect);
   1287  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1288   1.1  christos 				}
   1289  1.15      matt #endif
   1290   1.1  christos 			}
   1291   1.1  christos 		}
   1292  1.15      matt 		pte = pte_prot_downgrade(pte, prot);
   1293  1.15      matt 		if (*ptep != pte) {
   1294  1.46   thorpej 			pmap_tlb_miss_lock_enter();
   1295  1.35     skrll 			pte_set(ptep, pte);
   1296   1.1  christos 			/*
   1297   1.1  christos 			 * Update the TLB if needed.
   1298   1.1  christos 			 */
   1299  1.15      matt 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
   1300  1.46   thorpej 			pmap_tlb_miss_lock_exit();
   1301   1.1  christos 		}
   1302   1.1  christos 	}
   1303  1.15      matt 
   1304  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1305  1.15      matt 
   1306   1.1  christos 	return false;
   1307   1.1  christos }
   1308   1.1  christos 
   1309   1.1  christos /*
   1310   1.1  christos  *	Set the physical protection on the
   1311   1.1  christos  *	specified range of this map as requested.
   1312   1.1  christos  */
   1313   1.1  christos void
   1314   1.1  christos pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1315   1.1  christos {
   1316  1.49     skrll 	UVMHIST_FUNC(__func__);
   1317  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
   1318  1.37  pgoyette 	    (uintptr_t)pmap, sva, eva, prot);
   1319   1.1  christos 	PMAP_COUNT(protect);
   1320   1.1  christos 
   1321   1.1  christos 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1322   1.1  christos 		pmap_remove(pmap, sva, eva);
   1323  1.15      matt 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1324   1.1  christos 		return;
   1325   1.1  christos 	}
   1326   1.1  christos 
   1327   1.1  christos 	/*
   1328   1.1  christos 	 * Change protection on every valid mapping within this segment.
   1329   1.1  christos 	 */
   1330   1.1  christos 	kpreempt_disable();
   1331  1.15      matt 	pmap_addr_range_check(pmap, sva, eva, __func__);
   1332   1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
   1333   1.1  christos 	kpreempt_enable();
   1334   1.1  christos 
   1335  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1336   1.1  christos }
   1337   1.1  christos 
   1338  1.15      matt #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
   1339   1.1  christos /*
   1340   1.1  christos  *	pmap_page_cache:
   1341   1.1  christos  *
   1342   1.1  christos  *	Change all mappings of a managed page to cached/uncached.
   1343   1.1  christos  */
   1344  1.15      matt void
   1345  1.58     skrll pmap_page_cache(struct vm_page_md *mdpg, bool cached)
   1346   1.1  christos {
   1347  1.58     skrll #ifdef UVMHIST
   1348  1.58     skrll 	const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
   1349  1.58     skrll 	struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
   1350  1.58     skrll #endif
   1351  1.15      matt 
   1352  1.49     skrll 	UVMHIST_FUNC(__func__);
   1353  1.58     skrll 	UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
   1354  1.58     skrll 	    (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
   1355  1.15      matt 
   1356   1.1  christos 	KASSERT(kpreempt_disabled());
   1357  1.15      matt 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
   1358   1.1  christos 
   1359   1.1  christos 	if (cached) {
   1360   1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1361   1.1  christos 		PMAP_COUNT(page_cache_restorations);
   1362   1.1  christos 	} else {
   1363   1.1  christos 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
   1364   1.1  christos 		PMAP_COUNT(page_cache_evictions);
   1365   1.1  christos 	}
   1366   1.1  christos 
   1367  1.15      matt 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
   1368   1.1  christos 		pmap_t pmap = pv->pv_pmap;
   1369  1.15      matt 		vaddr_t va = trunc_page(pv->pv_va);
   1370   1.1  christos 
   1371   1.1  christos 		KASSERT(pmap != NULL);
   1372   1.1  christos 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1373   1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1374   1.1  christos 		if (ptep == NULL)
   1375   1.1  christos 			continue;
   1376  1.15      matt 		pt_entry_t pte = *ptep;
   1377  1.15      matt 		if (pte_valid_p(pte)) {
   1378  1.15      matt 			pte = pte_cached_change(pte, cached);
   1379  1.46   thorpej 			pmap_tlb_miss_lock_enter();
   1380  1.35     skrll 			pte_set(ptep, pte);
   1381  1.15      matt 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
   1382  1.46   thorpej 			pmap_tlb_miss_lock_exit();
   1383   1.1  christos 		}
   1384   1.1  christos 	}
   1385  1.15      matt 
   1386  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1387   1.1  christos }
   1388  1.15      matt #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
   1389   1.1  christos 
   1390   1.1  christos /*
   1391   1.1  christos  *	Insert the given physical page (p) at
   1392   1.1  christos  *	the specified virtual address (v) in the
   1393   1.1  christos  *	target physical map with the protection requested.
   1394   1.1  christos  *
   1395   1.1  christos  *	If specified, the page will be wired down, meaning
   1396   1.1  christos  *	that the related pte can not be reclaimed.
   1397   1.1  christos  *
   1398   1.1  christos  *	NB:  This is the only routine which MAY NOT lazy-evaluate
   1399   1.1  christos  *	or lose information.  That is, this routine must actually
   1400   1.1  christos  *	insert this page into the given map NOW.
   1401   1.1  christos  */
   1402   1.1  christos int
   1403   1.1  christos pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1404   1.1  christos {
   1405   1.1  christos 	const bool wired = (flags & PMAP_WIRED) != 0;
   1406   1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
   1407  1.72     skrll #if defined(EFI_RUNTIME)
   1408  1.72     skrll 	const bool is_efirt_pmap_p = (pmap == pmap_efirt());
   1409  1.72     skrll #else
   1410  1.72     skrll 	const bool is_efirt_pmap_p = false;
   1411  1.72     skrll #endif
   1412  1.15      matt 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
   1413   1.1  christos #ifdef UVMHIST
   1414  1.15      matt 	struct kern_history * const histp =
   1415   1.1  christos 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
   1416   1.1  christos #endif
   1417   1.1  christos 
   1418  1.49     skrll 	UVMHIST_FUNC(__func__);
   1419  1.49     skrll 	UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
   1420  1.37  pgoyette 	    (uintptr_t)pmap, va, pa, 0);
   1421  1.37  pgoyette 	UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
   1422   1.1  christos 
   1423   1.1  christos 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
   1424   1.1  christos 	if (is_kernel_pmap_p) {
   1425   1.1  christos 		PMAP_COUNT(kernel_mappings);
   1426   1.1  christos 		if (!good_color)
   1427   1.1  christos 			PMAP_COUNT(kernel_mappings_bad);
   1428   1.1  christos 	} else {
   1429   1.1  christos 		PMAP_COUNT(user_mappings);
   1430   1.1  christos 		if (!good_color)
   1431   1.1  christos 			PMAP_COUNT(user_mappings_bad);
   1432   1.1  christos 	}
   1433  1.15      matt 	pmap_addr_range_check(pmap, va, va, __func__);
   1434   1.1  christos 
   1435  1.15      matt 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
   1436  1.15      matt 	    VM_PROT_READ, prot);
   1437   1.1  christos 
   1438   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1439  1.15      matt 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1440   1.1  christos 
   1441  1.58     skrll 	struct vm_page_md *mdpp = NULL;
   1442  1.58     skrll #ifdef __HAVE_PMAP_PV_TRACK
   1443  1.58     skrll 	struct pmap_page *pp = pmap_pv_tracked(pa);
   1444  1.58     skrll 	mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
   1445  1.58     skrll #endif
   1446  1.58     skrll 
   1447  1.58     skrll 	if (mdpg) {
   1448   1.1  christos 		/* Set page referenced/modified status based on flags */
   1449  1.15      matt 		if (flags & VM_PROT_WRITE) {
   1450  1.73     skrll 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
   1451  1.15      matt 		} else if (flags & VM_PROT_ALL) {
   1452   1.1  christos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1453  1.15      matt 		}
   1454   1.1  christos 
   1455  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1456  1.15      matt 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
   1457   1.1  christos 			flags |= PMAP_NOCACHE;
   1458  1.15      matt 			PMAP_COUNT(uncached_mappings);
   1459  1.15      matt 		}
   1460   1.1  christos #endif
   1461   1.1  christos 
   1462   1.1  christos 		PMAP_COUNT(managed_mappings);
   1463  1.58     skrll 	} else if (mdpp) {
   1464  1.58     skrll #ifdef __HAVE_PMAP_PV_TRACK
   1465  1.58     skrll 		pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1466  1.58     skrll 
   1467  1.58     skrll 		PMAP_COUNT(pvtracked_mappings);
   1468  1.58     skrll #endif
   1469  1.72     skrll 	} else if (is_efirt_pmap_p) {
   1470  1.72     skrll 		PMAP_COUNT(efirt_mappings);
   1471   1.1  christos 	} else {
   1472   1.1  christos 		/*
   1473   1.1  christos 		 * Assumption: if it is not part of our managed memory
   1474   1.1  christos 		 * then it must be device memory which may be volatile.
   1475   1.1  christos 		 */
   1476  1.15      matt 		if ((flags & PMAP_CACHE_MASK) == 0)
   1477  1.15      matt 			flags |= PMAP_NOCACHE;
   1478   1.1  christos 		PMAP_COUNT(unmanaged_mappings);
   1479   1.1  christos 	}
   1480   1.1  christos 
   1481  1.72     skrll 	KASSERTMSG(mdpg == NULL || mdpp == NULL || is_efirt_pmap_p,
   1482  1.72     skrll 	    "mdpg %p mdpp %p efirt %s", mdpg, mdpp,
   1483  1.72     skrll 	    is_efirt_pmap_p ? "true" : "false");
   1484  1.58     skrll 
   1485  1.58     skrll 	struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
   1486  1.72     skrll 	pt_entry_t npte = is_efirt_pmap_p ?
   1487  1.72     skrll 	    pte_make_enter_efirt(pa, prot, flags) :
   1488  1.72     skrll 	    pte_make_enter(pa, md, prot, flags, is_kernel_pmap_p);
   1489   1.1  christos 
   1490   1.1  christos 	kpreempt_disable();
   1491  1.15      matt 
   1492   1.1  christos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1493   1.1  christos 	if (__predict_false(ptep == NULL)) {
   1494   1.1  christos 		kpreempt_enable();
   1495  1.15      matt 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
   1496   1.1  christos 		return ENOMEM;
   1497   1.1  christos 	}
   1498  1.15      matt 	const pt_entry_t opte = *ptep;
   1499  1.24     skrll 	const bool resident = pte_valid_p(opte);
   1500  1.24     skrll 	bool remap = false;
   1501  1.24     skrll 	if (resident) {
   1502  1.24     skrll 		if (pte_to_paddr(opte) != pa) {
   1503  1.24     skrll 			KASSERT(!is_kernel_pmap_p);
   1504  1.67     skrll 			const pt_entry_t rpte = pte_nv_entry(false);
   1505  1.24     skrll 
   1506  1.24     skrll 			pmap_addr_range_check(pmap, va, va + NBPG, __func__);
   1507  1.24     skrll 			pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
   1508  1.24     skrll 			    rpte);
   1509  1.24     skrll 			PMAP_COUNT(user_mappings_changed);
   1510  1.24     skrll 			remap = true;
   1511  1.24     skrll 		}
   1512  1.24     skrll 		update_flags |= PMAP_TLB_NEED_IPI;
   1513  1.24     skrll 	}
   1514  1.24     skrll 
   1515  1.24     skrll 	if (!resident || remap) {
   1516  1.24     skrll 		pmap->pm_stats.resident_count++;
   1517  1.24     skrll 	}
   1518   1.1  christos 
   1519   1.1  christos 	/* Done after case that may sleep/return. */
   1520  1.58     skrll 	if (md)
   1521  1.58     skrll 		pmap_enter_pv(pmap, va, pa, md, &npte, 0);
   1522   1.1  christos 
   1523   1.1  christos 	/*
   1524   1.1  christos 	 * Now validate mapping with desired protection/wiring.
   1525   1.1  christos 	 */
   1526   1.1  christos 	if (wired) {
   1527   1.1  christos 		pmap->pm_stats.wired_count++;
   1528   1.1  christos 		npte = pte_wire_entry(npte);
   1529   1.1  christos 	}
   1530   1.1  christos 
   1531  1.37  pgoyette 	UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
   1532  1.15      matt 	    pte_value(npte), pa, 0, 0);
   1533   1.1  christos 
   1534   1.1  christos 	KASSERT(pte_valid_p(npte));
   1535  1.15      matt 
   1536  1.46   thorpej 	pmap_tlb_miss_lock_enter();
   1537  1.35     skrll 	pte_set(ptep, npte);
   1538  1.15      matt 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
   1539  1.46   thorpej 	pmap_tlb_miss_lock_exit();
   1540   1.1  christos 	kpreempt_enable();
   1541   1.1  christos 
   1542   1.1  christos 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1543   1.1  christos 		KASSERT(mdpg != NULL);
   1544   1.1  christos 		PMAP_COUNT(exec_mappings);
   1545   1.1  christos 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1546   1.1  christos 			if (!pte_deferred_exec_p(npte)) {
   1547  1.37  pgoyette 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
   1548  1.37  pgoyette 				    "immediate syncicache",
   1549  1.37  pgoyette 				    va, (uintptr_t)pg, 0, 0);
   1550   1.1  christos 				pmap_page_syncicache(pg);
   1551   1.1  christos 				pmap_page_set_attributes(mdpg,
   1552   1.1  christos 				    VM_PAGEMD_EXECPAGE);
   1553   1.1  christos 				PMAP_COUNT(exec_synced_mappings);
   1554   1.1  christos 			} else {
   1555  1.37  pgoyette 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
   1556  1.37  pgoyette 				    "syncicache: pte %#jx",
   1557  1.37  pgoyette 				    va, (uintptr_t)pg, npte, 0);
   1558   1.1  christos 			}
   1559   1.1  christos 		} else {
   1560   1.1  christos 			UVMHIST_LOG(*histp,
   1561  1.37  pgoyette 			    "va=%#jx pg %#jx: no syncicache cached %jd",
   1562  1.37  pgoyette 			    va, (uintptr_t)pg, pte_cached_p(npte), 0);
   1563   1.1  christos 		}
   1564   1.1  christos 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1565   1.1  christos 		KASSERT(mdpg != NULL);
   1566   1.1  christos 		KASSERT(prot & VM_PROT_WRITE);
   1567   1.1  christos 		PMAP_COUNT(exec_mappings);
   1568   1.1  christos 		pmap_page_syncicache(pg);
   1569   1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1570  1.15      matt 		UVMHIST_LOG(*histp,
   1571  1.37  pgoyette 		    "va=%#jx pg %#jx: immediate syncicache (writeable)",
   1572  1.37  pgoyette 		    va, (uintptr_t)pg, 0, 0);
   1573   1.1  christos 	}
   1574   1.1  christos 
   1575  1.15      matt 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
   1576   1.1  christos 	return 0;
   1577   1.1  christos }
   1578   1.1  christos 
   1579   1.1  christos void
   1580   1.1  christos pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1581   1.1  christos {
   1582  1.15      matt 	pmap_t pmap = pmap_kernel();
   1583   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1584  1.15      matt 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
   1585   1.1  christos 
   1586  1.49     skrll 	UVMHIST_FUNC(__func__);
   1587  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
   1588  1.15      matt 	    va, pa, prot, flags);
   1589   1.1  christos 	PMAP_COUNT(kenter_pa);
   1590   1.1  christos 
   1591  1.15      matt 	if (mdpg == NULL) {
   1592   1.1  christos 		PMAP_COUNT(kenter_pa_unmanaged);
   1593  1.15      matt 		if ((flags & PMAP_CACHE_MASK) == 0)
   1594  1.15      matt 			flags |= PMAP_NOCACHE;
   1595   1.1  christos 	} else {
   1596  1.15      matt 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1597  1.15      matt 			PMAP_COUNT(kenter_pa_bad);
   1598   1.1  christos 	}
   1599   1.1  christos 
   1600  1.15      matt 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1601   1.1  christos 	kpreempt_disable();
   1602  1.69     skrll 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, 0);
   1603  1.69     skrll 
   1604  1.15      matt 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
   1605  1.15      matt 	    pmap_limits.virtual_end);
   1606   1.1  christos 	KASSERT(!pte_valid_p(*ptep));
   1607  1.15      matt 
   1608  1.15      matt 	/*
   1609  1.15      matt 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
   1610  1.15      matt 	 */
   1611  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1612  1.20      matt 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0
   1613  1.20      matt 	    && pmap_md_virtual_cache_aliasing_p()) {
   1614  1.58     skrll 		pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
   1615  1.15      matt 	}
   1616  1.15      matt #endif
   1617  1.15      matt 
   1618   1.1  christos 	/*
   1619   1.1  christos 	 * We have the option to force this mapping into the TLB but we
   1620   1.1  christos 	 * don't.  Instead let the next reference to the page do it.
   1621   1.1  christos 	 */
   1622  1.46   thorpej 	pmap_tlb_miss_lock_enter();
   1623  1.35     skrll 	pte_set(ptep, npte);
   1624   1.1  christos 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1625  1.46   thorpej 	pmap_tlb_miss_lock_exit();
   1626   1.1  christos 	kpreempt_enable();
   1627   1.1  christos #if DEBUG > 1
   1628   1.1  christos 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1629   1.1  christos 		if (((long *)va)[i] != ((long *)pa)[i])
   1630   1.1  christos 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1631   1.1  christos 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1632   1.1  christos 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1633   1.1  christos 	}
   1634   1.1  christos #endif
   1635  1.15      matt 
   1636  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
   1637  1.37  pgoyette 	    0);
   1638   1.1  christos }
   1639   1.1  christos 
   1640  1.15      matt /*
   1641  1.15      matt  *	Remove the given range of addresses from the kernel map.
   1642  1.15      matt  *
   1643  1.15      matt  *	It is assumed that the start and end are properly
   1644  1.15      matt  *	rounded to the page size.
   1645  1.15      matt  */
   1646  1.15      matt 
   1647   1.1  christos static bool
   1648   1.1  christos pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1649   1.1  christos 	uintptr_t flags)
   1650   1.1  christos {
   1651  1.15      matt 	const pt_entry_t new_pte = pte_nv_entry(true);
   1652  1.15      matt 
   1653  1.49     skrll 	UVMHIST_FUNC(__func__);
   1654  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
   1655  1.37  pgoyette 	    (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
   1656   1.1  christos 
   1657   1.1  christos 	KASSERT(kpreempt_disabled());
   1658   1.1  christos 
   1659   1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
   1660  1.15      matt 		pt_entry_t pte = *ptep;
   1661  1.15      matt 		if (!pte_valid_p(pte))
   1662   1.1  christos 			continue;
   1663   1.1  christos 
   1664   1.1  christos 		PMAP_COUNT(kremove_pages);
   1665  1.21       mrg #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1666  1.15      matt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
   1667  1.20      matt 		if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
   1668  1.15      matt 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
   1669  1.15      matt 		}
   1670  1.20      matt #endif
   1671   1.1  christos 
   1672  1.46   thorpej 		pmap_tlb_miss_lock_enter();
   1673  1.35     skrll 		pte_set(ptep, new_pte);
   1674  1.15      matt 		pmap_tlb_invalidate_addr(pmap, sva);
   1675  1.46   thorpej 		pmap_tlb_miss_lock_exit();
   1676   1.1  christos 	}
   1677   1.1  christos 
   1678  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1679  1.15      matt 
   1680   1.1  christos 	return false;
   1681   1.1  christos }
   1682   1.1  christos 
   1683   1.1  christos void
   1684   1.1  christos pmap_kremove(vaddr_t va, vsize_t len)
   1685   1.1  christos {
   1686   1.1  christos 	const vaddr_t sva = trunc_page(va);
   1687   1.1  christos 	const vaddr_t eva = round_page(va + len);
   1688   1.1  christos 
   1689  1.49     skrll 	UVMHIST_FUNC(__func__);
   1690  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
   1691   1.1  christos 
   1692   1.1  christos 	kpreempt_disable();
   1693   1.1  christos 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1694   1.1  christos 	kpreempt_enable();
   1695   1.1  christos 
   1696  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1697   1.1  christos }
   1698   1.1  christos 
   1699  1.48        ad bool
   1700   1.1  christos pmap_remove_all(struct pmap *pmap)
   1701   1.1  christos {
   1702  1.49     skrll 	UVMHIST_FUNC(__func__);
   1703  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
   1704  1.15      matt 
   1705   1.1  christos 	KASSERT(pmap != pmap_kernel());
   1706   1.1  christos 
   1707   1.1  christos 	kpreempt_disable();
   1708   1.1  christos 	/*
   1709   1.1  christos 	 * Free all of our ASIDs which means we can skip doing all the
   1710   1.1  christos 	 * tlb_invalidate_addrs().
   1711   1.1  christos 	 */
   1712  1.46   thorpej 	pmap_tlb_miss_lock_enter();
   1713  1.15      matt #ifdef MULTIPROCESSOR
   1714  1.15      matt 	// This should be the last CPU with this pmap onproc
   1715  1.15      matt 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
   1716  1.15      matt 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
   1717  1.15      matt #endif
   1718  1.15      matt 		pmap_tlb_asid_deactivate(pmap);
   1719  1.15      matt #ifdef MULTIPROCESSOR
   1720  1.15      matt 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
   1721  1.15      matt #endif
   1722   1.1  christos 	pmap_tlb_asid_release_all(pmap);
   1723  1.46   thorpej 	pmap_tlb_miss_lock_exit();
   1724   1.1  christos 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1725   1.1  christos 
   1726  1.15      matt #ifdef PMAP_FAULTINFO
   1727  1.15      matt 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
   1728  1.15      matt 	curpcb->pcb_faultinfo.pfi_repeats = 0;
   1729  1.60     skrll 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
   1730  1.15      matt #endif
   1731   1.1  christos 	kpreempt_enable();
   1732  1.15      matt 
   1733  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1734  1.48        ad 	return false;
   1735   1.1  christos }
   1736   1.1  christos 
   1737   1.1  christos /*
   1738   1.1  christos  *	Routine:	pmap_unwire
   1739   1.1  christos  *	Function:	Clear the wired attribute for a map/virtual-address
   1740   1.1  christos  *			pair.
   1741   1.1  christos  *	In/out conditions:
   1742   1.1  christos  *			The mapping must already exist in the pmap.
   1743   1.1  christos  */
   1744   1.1  christos void
   1745   1.1  christos pmap_unwire(pmap_t pmap, vaddr_t va)
   1746   1.1  christos {
   1747  1.49     skrll 	UVMHIST_FUNC(__func__);
   1748  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
   1749  1.37  pgoyette 	    0, 0);
   1750   1.1  christos 	PMAP_COUNT(unwire);
   1751   1.1  christos 
   1752   1.1  christos 	/*
   1753   1.1  christos 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1754   1.1  christos 	 */
   1755   1.1  christos 	kpreempt_disable();
   1756  1.15      matt 	pmap_addr_range_check(pmap, va, va, __func__);
   1757   1.1  christos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1758  1.15      matt 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
   1759  1.15      matt 	    pmap, va);
   1760  1.15      matt 	pt_entry_t pte = *ptep;
   1761  1.15      matt 	KASSERTMSG(pte_valid_p(pte),
   1762  1.66     skrll 	    "pmap %p va %#" PRIxVADDR " invalid PTE %#" PRIxPTE " @ %p",
   1763  1.15      matt 	    pmap, va, pte_value(pte), ptep);
   1764   1.1  christos 
   1765  1.15      matt 	if (pte_wired_p(pte)) {
   1766  1.46   thorpej 		pmap_tlb_miss_lock_enter();
   1767  1.35     skrll 		pte_set(ptep, pte_unwire_entry(pte));
   1768  1.46   thorpej 		pmap_tlb_miss_lock_exit();
   1769   1.1  christos 		pmap->pm_stats.wired_count--;
   1770   1.1  christos 	}
   1771   1.1  christos #ifdef DIAGNOSTIC
   1772   1.1  christos 	else {
   1773   1.1  christos 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1774   1.1  christos 		    __func__, pmap, va);
   1775   1.1  christos 	}
   1776   1.1  christos #endif
   1777   1.1  christos 	kpreempt_enable();
   1778  1.15      matt 
   1779  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   1780   1.1  christos }
   1781   1.1  christos 
   1782   1.1  christos /*
   1783   1.1  christos  *	Routine:	pmap_extract
   1784   1.1  christos  *	Function:
   1785   1.1  christos  *		Extract the physical page address associated
   1786   1.1  christos  *		with the given map/virtual_address pair.
   1787   1.1  christos  */
   1788   1.1  christos bool
   1789   1.1  christos pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1790   1.1  christos {
   1791   1.1  christos 	paddr_t pa;
   1792   1.1  christos 
   1793   1.1  christos 	if (pmap == pmap_kernel()) {
   1794   1.1  christos 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1795   1.1  christos 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1796   1.1  christos 			goto done;
   1797   1.1  christos 		}
   1798   1.1  christos 		if (pmap_md_io_vaddr_p(va))
   1799   1.1  christos 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1800  1.15      matt 
   1801  1.15      matt 		if (va >= pmap_limits.virtual_end)
   1802  1.15      matt 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
   1803  1.15      matt 			    __func__, va);
   1804   1.1  christos 	}
   1805   1.1  christos 	kpreempt_disable();
   1806  1.15      matt 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1807  1.15      matt 	if (ptep == NULL || !pte_valid_p(*ptep)) {
   1808   1.1  christos 		kpreempt_enable();
   1809   1.1  christos 		return false;
   1810   1.1  christos 	}
   1811   1.1  christos 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1812   1.1  christos 	kpreempt_enable();
   1813   1.1  christos done:
   1814   1.1  christos 	if (pap != NULL) {
   1815   1.1  christos 		*pap = pa;
   1816   1.1  christos 	}
   1817   1.1  christos 	return true;
   1818   1.1  christos }
   1819   1.1  christos 
   1820   1.1  christos /*
   1821   1.1  christos  *	Copy the range specified by src_addr/len
   1822   1.1  christos  *	from the source map to the range dst_addr/len
   1823   1.1  christos  *	in the destination map.
   1824   1.1  christos  *
   1825   1.1  christos  *	This routine is only advisory and need not do anything.
   1826   1.1  christos  */
   1827   1.1  christos void
   1828   1.1  christos pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1829   1.1  christos     vaddr_t src_addr)
   1830   1.1  christos {
   1831  1.49     skrll 	UVMHIST_FUNC(__func__);
   1832  1.79     skrll 	UVMHIST_CALLARGS(pmaphist, "(dpm=#%jx spm=%#jx dva=%#jx sva=%#jx",
   1833  1.80     skrll 	    (uintptr_t)dst_pmap, (uintptr_t)src_pmap, dst_addr, src_addr);
   1834  1.79     skrll 	UVMHIST_LOG(pmaphist, "... len=%#jx)", len, 0, 0, 0);
   1835   1.1  christos 	PMAP_COUNT(copy);
   1836   1.1  christos }
   1837   1.1  christos 
   1838   1.1  christos /*
   1839   1.1  christos  *	pmap_clear_reference:
   1840   1.1  christos  *
   1841   1.1  christos  *	Clear the reference bit on the specified physical page.
   1842   1.1  christos  */
   1843   1.1  christos bool
   1844   1.1  christos pmap_clear_reference(struct vm_page *pg)
   1845   1.1  christos {
   1846   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1847   1.1  christos 
   1848  1.49     skrll 	UVMHIST_FUNC(__func__);
   1849  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
   1850  1.37  pgoyette 	   (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1851   1.1  christos 
   1852   1.1  christos 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1853   1.1  christos 
   1854  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
   1855   1.1  christos 
   1856   1.1  christos 	return rv;
   1857   1.1  christos }
   1858   1.1  christos 
   1859   1.1  christos /*
   1860   1.1  christos  *	pmap_is_referenced:
   1861   1.1  christos  *
   1862   1.1  christos  *	Return whether or not the specified physical page is referenced
   1863   1.1  christos  *	by any physical maps.
   1864   1.1  christos  */
   1865   1.1  christos bool
   1866   1.1  christos pmap_is_referenced(struct vm_page *pg)
   1867   1.1  christos {
   1868   1.1  christos 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1869   1.1  christos }
   1870   1.1  christos 
   1871   1.1  christos /*
   1872   1.1  christos  *	Clear the modify bits on the specified physical page.
   1873   1.1  christos  */
   1874   1.1  christos bool
   1875   1.1  christos pmap_clear_modify(struct vm_page *pg)
   1876   1.1  christos {
   1877   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1878   1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
   1879   1.1  christos 	pv_entry_t pv_next;
   1880   1.1  christos 
   1881  1.49     skrll 	UVMHIST_FUNC(__func__);
   1882  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
   1883  1.37  pgoyette 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1884   1.1  christos 	PMAP_COUNT(clear_modify);
   1885   1.1  christos 
   1886   1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1887   1.1  christos 		if (pv->pv_pmap == NULL) {
   1888   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1889  1.37  pgoyette 			    "pg %#jx (pa %#jx): execpage cleared",
   1890  1.37  pgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1891   1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1892   1.1  christos 			PMAP_COUNT(exec_uncached_clear_modify);
   1893   1.1  christos 		} else {
   1894   1.1  christos 			UVMHIST_LOG(pmapexechist,
   1895  1.37  pgoyette 			    "pg %#jx (pa %#jx): syncicache performed",
   1896  1.37  pgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
   1897   1.1  christos 			pmap_page_syncicache(pg);
   1898   1.1  christos 			PMAP_COUNT(exec_synced_clear_modify);
   1899   1.1  christos 		}
   1900   1.1  christos 	}
   1901   1.1  christos 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1902  1.15      matt 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
   1903   1.1  christos 		return false;
   1904   1.1  christos 	}
   1905   1.1  christos 	if (pv->pv_pmap == NULL) {
   1906  1.15      matt 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
   1907   1.1  christos 		return true;
   1908   1.1  christos 	}
   1909   1.1  christos 
   1910   1.1  christos 	/*
   1911   1.1  christos 	 * remove write access from any pages that are dirty
   1912   1.1  christos 	 * so we can tell if they are written to again later.
   1913   1.1  christos 	 * flush the VAC first if there is one.
   1914   1.1  christos 	 */
   1915   1.1  christos 	kpreempt_disable();
   1916  1.15      matt 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
   1917  1.15      matt 	pmap_pvlist_check(mdpg);
   1918   1.1  christos 	for (; pv != NULL; pv = pv_next) {
   1919   1.1  christos 		pmap_t pmap = pv->pv_pmap;
   1920  1.15      matt 		vaddr_t va = trunc_page(pv->pv_va);
   1921  1.15      matt 
   1922  1.15      matt 		pv_next = pv->pv_next;
   1923  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1924  1.42     skrll 		if (PV_ISKENTER_P(pv))
   1925  1.15      matt 			continue;
   1926  1.15      matt #endif
   1927   1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1928   1.1  christos 		KASSERT(ptep);
   1929  1.15      matt 		pt_entry_t pte = pte_prot_nowrite(*ptep);
   1930  1.15      matt 		if (*ptep == pte) {
   1931   1.1  christos 			continue;
   1932   1.1  christos 		}
   1933  1.15      matt 		KASSERT(pte_valid_p(pte));
   1934  1.15      matt 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1935  1.46   thorpej 		pmap_tlb_miss_lock_enter();
   1936  1.35     skrll 		pte_set(ptep, pte);
   1937   1.1  christos 		pmap_tlb_invalidate_addr(pmap, va);
   1938  1.46   thorpej 		pmap_tlb_miss_lock_exit();
   1939   1.1  christos 		pmap_update(pmap);
   1940  1.15      matt 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
   1941   1.1  christos 			/*
   1942   1.1  christos 			 * The list changed!  So restart from the beginning.
   1943   1.1  christos 			 */
   1944   1.1  christos 			pv_next = &mdpg->mdpg_first;
   1945  1.15      matt 			pmap_pvlist_check(mdpg);
   1946   1.1  christos 		}
   1947   1.1  christos 	}
   1948  1.15      matt 	pmap_pvlist_check(mdpg);
   1949   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1950   1.1  christos 	kpreempt_enable();
   1951   1.1  christos 
   1952  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
   1953   1.1  christos 	return true;
   1954   1.1  christos }
   1955   1.1  christos 
   1956   1.1  christos /*
   1957   1.1  christos  *	pmap_is_modified:
   1958   1.1  christos  *
   1959   1.1  christos  *	Return whether or not the specified physical page is modified
   1960   1.1  christos  *	by any physical maps.
   1961   1.1  christos  */
   1962   1.1  christos bool
   1963   1.1  christos pmap_is_modified(struct vm_page *pg)
   1964   1.1  christos {
   1965   1.1  christos 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1966   1.1  christos }
   1967   1.1  christos 
   1968   1.1  christos /*
   1969   1.1  christos  *	pmap_set_modified:
   1970   1.1  christos  *
   1971   1.1  christos  *	Sets the page modified reference bit for the specified page.
   1972   1.1  christos  */
   1973   1.1  christos void
   1974   1.1  christos pmap_set_modified(paddr_t pa)
   1975   1.1  christos {
   1976   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1977   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1978  1.73     skrll 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
   1979   1.1  christos }
   1980   1.1  christos 
   1981   1.1  christos /******************** pv_entry management ********************/
   1982   1.1  christos 
   1983   1.1  christos static void
   1984  1.15      matt pmap_pvlist_check(struct vm_page_md *mdpg)
   1985   1.1  christos {
   1986  1.15      matt #ifdef DEBUG
   1987  1.15      matt 	pv_entry_t pv = &mdpg->mdpg_first;
   1988   1.1  christos 	if (pv->pv_pmap != NULL) {
   1989  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1990  1.15      matt 		const u_int colormask = uvmexp.colormask;
   1991  1.15      matt 		u_int colors = 0;
   1992  1.15      matt #endif
   1993   1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
   1994  1.15      matt 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1995  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   1996  1.15      matt 			colors |= __BIT(atop(pv->pv_va) & colormask);
   1997  1.15      matt #endif
   1998   1.1  christos 		}
   1999  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2000  1.30     skrll 		// Assert that if there is more than 1 color mapped, that the
   2001  1.30     skrll 		// page is uncached.
   2002  1.15      matt 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
   2003  1.15      matt 		    || colors == 0 || (colors & (colors-1)) == 0
   2004  1.15      matt 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
   2005  1.15      matt 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
   2006  1.15      matt #endif
   2007  1.34     skrll 	} else {
   2008  1.67     skrll 		KASSERT(pv->pv_next == NULL);
   2009   1.1  christos 	}
   2010  1.15      matt #endif /* DEBUG */
   2011   1.1  christos }
   2012   1.1  christos 
   2013   1.1  christos /*
   2014   1.1  christos  * Enter the pmap and virtual address into the
   2015   1.1  christos  * physical to virtual map table.
   2016   1.1  christos  */
   2017   1.1  christos void
   2018  1.58     skrll pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
   2019  1.58     skrll     pt_entry_t *nptep, u_int flags)
   2020   1.1  christos {
   2021   1.1  christos 	pv_entry_t pv, npv, apv;
   2022  1.15      matt #ifdef UVMHIST
   2023  1.15      matt 	bool first = false;
   2024  1.58     skrll 	struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
   2025  1.58     skrll 	    NULL;
   2026  1.15      matt #endif
   2027   1.1  christos 
   2028  1.49     skrll 	UVMHIST_FUNC(__func__);
   2029  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
   2030  1.58     skrll 	    (uintptr_t)pmap, va, (uintptr_t)pg, pa);
   2031  1.37  pgoyette 	UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
   2032  1.37  pgoyette 	    (uintptr_t)nptep, pte_value(*nptep), 0, 0);
   2033   1.1  christos 
   2034   1.1  christos 	KASSERT(kpreempt_disabled());
   2035   1.1  christos 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   2036  1.15      matt 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
   2037  1.15      matt 	    "va %#"PRIxVADDR, va);
   2038   1.1  christos 
   2039   1.1  christos 	apv = NULL;
   2040  1.15      matt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   2041  1.15      matt again:
   2042   1.1  christos 	pv = &mdpg->mdpg_first;
   2043  1.15      matt 	pmap_pvlist_check(mdpg);
   2044   1.1  christos 	if (pv->pv_pmap == NULL) {
   2045   1.1  christos 		KASSERT(pv->pv_next == NULL);
   2046   1.1  christos 		/*
   2047   1.1  christos 		 * No entries yet, use header as the first entry
   2048   1.1  christos 		 */
   2049   1.1  christos 		PMAP_COUNT(primary_mappings);
   2050   1.1  christos 		PMAP_COUNT(mappings);
   2051  1.15      matt #ifdef UVMHIST
   2052   1.1  christos 		first = true;
   2053  1.15      matt #endif
   2054  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2055  1.15      matt 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
   2056  1.15      matt 		// If the new mapping has an incompatible color the last
   2057  1.15      matt 		// mapping of this page, clean the page before using it.
   2058  1.15      matt 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
   2059  1.58     skrll 			pmap_md_vca_clean(mdpg, PMAP_WBINV);
   2060  1.15      matt 		}
   2061   1.1  christos #endif
   2062   1.1  christos 		pv->pv_pmap = pmap;
   2063  1.15      matt 		pv->pv_va = va | flags;
   2064   1.1  christos 	} else {
   2065  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2066  1.58     skrll 		if (pmap_md_vca_add(mdpg, va, nptep)) {
   2067   1.1  christos 			goto again;
   2068  1.15      matt 		}
   2069  1.15      matt #endif
   2070   1.1  christos 
   2071   1.1  christos 		/*
   2072   1.1  christos 		 * There is at least one other VA mapping this page.
   2073   1.1  christos 		 * Place this entry after the header.
   2074   1.1  christos 		 *
   2075   1.1  christos 		 * Note: the entry may already be in the table if
   2076   1.1  christos 		 * we are only changing the protection bits.
   2077   1.1  christos 		 */
   2078   1.1  christos 
   2079   1.1  christos 		for (npv = pv; npv; npv = npv->pv_next) {
   2080  1.15      matt 			if (pmap == npv->pv_pmap
   2081  1.15      matt 			    && va == trunc_page(npv->pv_va)) {
   2082   1.1  christos #ifdef PARANOIADIAG
   2083   1.1  christos 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   2084  1.15      matt 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
   2085  1.15      matt 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
   2086  1.15      matt 					printf("%s: found va %#"PRIxVADDR
   2087  1.15      matt 					    " pa %#"PRIxPADDR
   2088  1.15      matt 					    " in pv_table but != %#"PRIxPTE"\n",
   2089  1.15      matt 					    __func__, va, pa, pte_value(pte));
   2090   1.1  christos #endif
   2091   1.1  christos 				PMAP_COUNT(remappings);
   2092   1.1  christos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2093   1.1  christos 				if (__predict_false(apv != NULL))
   2094   1.1  christos 					pmap_pv_free(apv);
   2095  1.15      matt 
   2096  1.37  pgoyette 				UVMHIST_LOG(pmaphist,
   2097  1.37  pgoyette 				    " <-- done pv=%#jx (reused)",
   2098  1.37  pgoyette 				    (uintptr_t)pv, 0, 0, 0);
   2099   1.1  christos 				return;
   2100   1.1  christos 			}
   2101   1.1  christos 		}
   2102   1.1  christos 		if (__predict_true(apv == NULL)) {
   2103   1.1  christos 			/*
   2104   1.1  christos 			 * To allocate a PV, we have to release the PVLIST lock
   2105   1.1  christos 			 * so get the page generation.  We allocate the PV, and
   2106  1.15      matt 			 * then reacquire the lock.
   2107   1.1  christos 			 */
   2108  1.15      matt 			pmap_pvlist_check(mdpg);
   2109  1.15      matt 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2110   1.1  christos 
   2111   1.1  christos 			apv = (pv_entry_t)pmap_pv_alloc();
   2112   1.1  christos 			if (apv == NULL)
   2113   1.1  christos 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   2114   1.1  christos 
   2115   1.1  christos 			/*
   2116   1.1  christos 			 * If the generation has changed, then someone else
   2117  1.15      matt 			 * tinkered with this page so we should start over.
   2118   1.1  christos 			 */
   2119  1.15      matt 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
   2120   1.1  christos 				goto again;
   2121   1.1  christos 		}
   2122   1.1  christos 		npv = apv;
   2123   1.1  christos 		apv = NULL;
   2124  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2125  1.15      matt 		/*
   2126  1.15      matt 		 * If need to deal with virtual cache aliases, keep mappings
   2127  1.15      matt 		 * in the kernel pmap at the head of the list.  This allows
   2128  1.15      matt 		 * the VCA code to easily use them for cache operations if
   2129  1.15      matt 		 * present.
   2130  1.15      matt 		 */
   2131  1.15      matt 		pmap_t kpmap = pmap_kernel();
   2132  1.15      matt 		if (pmap != kpmap) {
   2133  1.15      matt 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
   2134  1.15      matt 				pv = pv->pv_next;
   2135  1.15      matt 			}
   2136  1.15      matt 		}
   2137  1.15      matt #endif
   2138  1.15      matt 		npv->pv_va = va | flags;
   2139   1.1  christos 		npv->pv_pmap = pmap;
   2140   1.1  christos 		npv->pv_next = pv->pv_next;
   2141   1.1  christos 		pv->pv_next = npv;
   2142   1.1  christos 		PMAP_COUNT(mappings);
   2143   1.1  christos 	}
   2144  1.15      matt 	pmap_pvlist_check(mdpg);
   2145   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2146   1.1  christos 	if (__predict_false(apv != NULL))
   2147   1.1  christos 		pmap_pv_free(apv);
   2148   1.1  christos 
   2149  1.37  pgoyette 	UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
   2150  1.37  pgoyette 	    first, 0, 0);
   2151   1.1  christos }
   2152   1.1  christos 
   2153   1.1  christos /*
   2154   1.1  christos  * Remove a physical to virtual address translation.
   2155   1.1  christos  * If cache was inhibited on this page, and there are no more cache
   2156   1.1  christos  * conflicts, restore caching.
   2157   1.1  christos  * Flush the cache if the last page is removed (should always be cached
   2158   1.1  christos  * at this point).
   2159   1.1  christos  */
   2160   1.1  christos void
   2161   1.1  christos pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   2162   1.1  christos {
   2163   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2164   1.1  christos 	pv_entry_t pv, npv;
   2165   1.1  christos 	bool last;
   2166   1.1  christos 
   2167  1.49     skrll 	UVMHIST_FUNC(__func__);
   2168  1.49     skrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
   2169  1.37  pgoyette 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
   2170  1.37  pgoyette 	UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
   2171   1.1  christos 
   2172   1.1  christos 	KASSERT(kpreempt_disabled());
   2173  1.15      matt 	KASSERT((va & PAGE_MASK) == 0);
   2174   1.1  christos 	pv = &mdpg->mdpg_first;
   2175   1.1  christos 
   2176  1.15      matt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
   2177  1.15      matt 	pmap_pvlist_check(mdpg);
   2178   1.1  christos 
   2179   1.1  christos 	/*
   2180   1.1  christos 	 * If it is the first entry on the list, it is actually
   2181   1.1  christos 	 * in the header and we must copy the following entry up
   2182   1.1  christos 	 * to the header.  Otherwise we must search the list for
   2183   1.1  christos 	 * the entry.  In either case we free the now unused entry.
   2184   1.1  christos 	 */
   2185   1.1  christos 
   2186   1.1  christos 	last = false;
   2187  1.15      matt 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
   2188   1.1  christos 		npv = pv->pv_next;
   2189   1.1  christos 		if (npv) {
   2190   1.1  christos 			*pv = *npv;
   2191   1.1  christos 			KASSERT(pv->pv_pmap != NULL);
   2192   1.1  christos 		} else {
   2193  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2194  1.15      matt 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
   2195   1.1  christos #endif
   2196   1.1  christos 			pv->pv_pmap = NULL;
   2197   1.1  christos 			last = true;	/* Last mapping removed */
   2198   1.1  christos 		}
   2199   1.1  christos 		PMAP_COUNT(remove_pvfirst);
   2200   1.1  christos 	} else {
   2201   1.1  christos 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   2202   1.1  christos 			PMAP_COUNT(remove_pvsearch);
   2203  1.15      matt 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
   2204   1.1  christos 				break;
   2205   1.1  christos 		}
   2206   1.1  christos 		if (npv) {
   2207   1.1  christos 			pv->pv_next = npv->pv_next;
   2208   1.1  christos 		}
   2209   1.1  christos 	}
   2210   1.1  christos 
   2211  1.15      matt 	pmap_pvlist_check(mdpg);
   2212   1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   2213   1.1  christos 
   2214  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2215  1.15      matt 	pmap_md_vca_remove(pg, va, dirty, last);
   2216  1.15      matt #endif
   2217  1.15      matt 
   2218   1.1  christos 	/*
   2219   1.1  christos 	 * Free the pv_entry if needed.
   2220   1.1  christos 	 */
   2221   1.1  christos 	if (npv)
   2222   1.1  christos 		pmap_pv_free(npv);
   2223   1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   2224   1.1  christos 		if (last) {
   2225   1.1  christos 			/*
   2226   1.1  christos 			 * If this was the page's last mapping, we no longer
   2227   1.1  christos 			 * care about its execness.
   2228   1.1  christos 			 */
   2229   1.1  christos 			UVMHIST_LOG(pmapexechist,
   2230  1.37  pgoyette 			    "pg %#jx (pa %#jx)last %ju: execpage cleared",
   2231  1.37  pgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2232   1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   2233   1.1  christos 			PMAP_COUNT(exec_uncached_remove);
   2234   1.1  christos 		} else {
   2235   1.1  christos 			/*
   2236   1.1  christos 			 * Someone still has it mapped as an executable page
   2237   1.1  christos 			 * so we must sync it.
   2238   1.1  christos 			 */
   2239   1.1  christos 			UVMHIST_LOG(pmapexechist,
   2240  1.37  pgoyette 			    "pg %#jx (pa %#jx) last %ju: performed syncicache",
   2241  1.37  pgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
   2242   1.1  christos 			pmap_page_syncicache(pg);
   2243   1.1  christos 			PMAP_COUNT(exec_synced_remove);
   2244   1.1  christos 		}
   2245   1.1  christos 	}
   2246  1.15      matt 
   2247  1.15      matt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
   2248   1.1  christos }
   2249   1.1  christos 
   2250   1.1  christos #if defined(MULTIPROCESSOR)
   2251   1.1  christos struct pmap_pvlist_info {
   2252   1.1  christos 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   2253   1.1  christos 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   2254   1.1  christos 	volatile u_int pli_lock_index;
   2255   1.1  christos 	u_int pli_lock_mask;
   2256   1.1  christos } pmap_pvlist_info;
   2257   1.1  christos 
   2258   1.1  christos void
   2259   1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   2260   1.1  christos {
   2261   1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2262   1.1  christos 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   2263   1.1  christos 	vaddr_t lock_va = lock_page;
   2264   1.1  christos 	if (sizeof(kmutex_t) > cache_line_size) {
   2265   1.1  christos 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   2266   1.1  christos 	}
   2267   1.1  christos 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   2268   1.1  christos 	KASSERT((nlocks & (nlocks - 1)) == 0);
   2269   1.1  christos 	/*
   2270   1.1  christos 	 * Now divide the page into a number of mutexes, one per cacheline.
   2271   1.1  christos 	 */
   2272   1.1  christos 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   2273   1.1  christos 		kmutex_t * const lock = (kmutex_t *)lock_va;
   2274  1.15      matt 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
   2275   1.1  christos 		pli->pli_locks[i] = lock;
   2276   1.1  christos 	}
   2277   1.1  christos 	pli->pli_lock_mask = nlocks - 1;
   2278   1.1  christos }
   2279   1.1  christos 
   2280  1.15      matt kmutex_t *
   2281  1.15      matt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2282   1.1  christos {
   2283   1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   2284   1.1  christos 	kmutex_t *lock = mdpg->mdpg_lock;
   2285   1.1  christos 
   2286   1.1  christos 	/*
   2287   1.1  christos 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   2288   1.1  christos 	 * semi-random distribution not based on page color.
   2289   1.1  christos 	 */
   2290   1.1  christos 	if (__predict_false(lock == NULL)) {
   2291   1.1  christos 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   2292   1.1  christos 		size_t lockid = locknum & pli->pli_lock_mask;
   2293   1.1  christos 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   2294   1.1  christos 		/*
   2295   1.1  christos 		 * Set the lock.  If some other thread already did, just use
   2296   1.1  christos 		 * the one they assigned.
   2297   1.1  christos 		 */
   2298   1.1  christos 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   2299   1.1  christos 		if (lock == NULL) {
   2300   1.1  christos 			lock = new_lock;
   2301   1.1  christos 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   2302   1.1  christos 		}
   2303   1.1  christos 	}
   2304   1.1  christos 
   2305   1.1  christos 	/*
   2306  1.15      matt 	 * Now finally provide the lock.
   2307   1.1  christos 	 */
   2308  1.15      matt 	return lock;
   2309   1.1  christos }
   2310   1.1  christos #else /* !MULTIPROCESSOR */
   2311   1.1  christos void
   2312   1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   2313   1.1  christos {
   2314  1.15      matt 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
   2315   1.1  christos }
   2316   1.1  christos 
   2317   1.1  christos #ifdef MODULAR
   2318  1.15      matt kmutex_t *
   2319  1.15      matt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
   2320   1.1  christos {
   2321   1.1  christos 	/*
   2322   1.1  christos 	 * We just use a global lock.
   2323   1.1  christos 	 */
   2324   1.1  christos 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   2325   1.1  christos 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   2326   1.1  christos 	}
   2327   1.1  christos 
   2328   1.1  christos 	/*
   2329  1.15      matt 	 * Now finally provide the lock.
   2330   1.1  christos 	 */
   2331  1.15      matt 	return mdpg->mdpg_lock;
   2332   1.1  christos }
   2333   1.1  christos #endif /* MODULAR */
   2334   1.1  christos #endif /* !MULTIPROCESSOR */
   2335   1.1  christos 
   2336   1.1  christos /*
   2337   1.1  christos  * pmap_pv_page_alloc:
   2338   1.1  christos  *
   2339   1.1  christos  *	Allocate a page for the pv_entry pool.
   2340   1.1  christos  */
   2341   1.1  christos void *
   2342   1.1  christos pmap_pv_page_alloc(struct pool *pp, int flags)
   2343   1.1  christos {
   2344  1.69     skrll 	struct vm_page * const pg = pmap_md_alloc_poolpage(UVM_PGA_USERESERVE);
   2345   1.1  christos 	if (pg == NULL)
   2346   1.1  christos 		return NULL;
   2347   1.1  christos 
   2348  1.69     skrll 	return (void *)pmap_md_map_poolpage(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
   2349   1.1  christos }
   2350   1.1  christos 
   2351   1.1  christos /*
   2352   1.1  christos  * pmap_pv_page_free:
   2353   1.1  christos  *
   2354   1.1  christos  *	Free a pv_entry pool page.
   2355   1.1  christos  */
   2356   1.1  christos void
   2357   1.1  christos pmap_pv_page_free(struct pool *pp, void *v)
   2358   1.1  christos {
   2359   1.1  christos 	vaddr_t va = (vaddr_t)v;
   2360   1.1  christos 
   2361   1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2362   1.1  christos 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2363   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2364  1.15      matt 	KASSERT(pg != NULL);
   2365  1.15      matt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2366  1.15      matt 	kpreempt_disable();
   2367  1.15      matt 	pmap_md_vca_remove(pg, va, true, true);
   2368  1.15      matt 	kpreempt_enable();
   2369  1.15      matt #endif
   2370  1.15      matt 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2371  1.34     skrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2372   1.1  christos 	uvm_pagefree(pg);
   2373   1.1  christos }
   2374   1.1  christos 
   2375   1.1  christos #ifdef PMAP_PREFER
   2376   1.1  christos /*
   2377   1.1  christos  * Find first virtual address >= *vap that doesn't cause
   2378   1.1  christos  * a cache alias conflict.
   2379   1.1  christos  */
   2380   1.1  christos void
   2381   1.1  christos pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   2382   1.1  christos {
   2383   1.1  christos 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   2384   1.1  christos 
   2385   1.1  christos 	PMAP_COUNT(prefer_requests);
   2386   1.1  christos 
   2387   1.1  christos 	prefer_mask |= pmap_md_cache_prefer_mask();
   2388   1.1  christos 
   2389   1.1  christos 	if (prefer_mask) {
   2390  1.15      matt 		vaddr_t	va = *vap;
   2391  1.15      matt 		vsize_t d = (foff - va) & prefer_mask;
   2392   1.1  christos 		if (d) {
   2393   1.1  christos 			if (td)
   2394  1.15      matt 				*vap = trunc_page(va - ((-d) & prefer_mask));
   2395   1.1  christos 			else
   2396   1.1  christos 				*vap = round_page(va + d);
   2397   1.1  christos 			PMAP_COUNT(prefer_adjustments);
   2398   1.1  christos 		}
   2399   1.1  christos 	}
   2400   1.1  christos }
   2401   1.1  christos #endif /* PMAP_PREFER */
   2402   1.1  christos 
   2403   1.1  christos #ifdef PMAP_MAP_POOLPAGE
   2404   1.1  christos vaddr_t
   2405   1.1  christos pmap_map_poolpage(paddr_t pa)
   2406   1.1  christos {
   2407   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2408   1.1  christos 	KASSERT(pg);
   2409  1.34     skrll 
   2410   1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2411  1.34     skrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
   2412  1.34     skrll 
   2413   1.1  christos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   2414   1.1  christos 
   2415  1.15      matt 	return pmap_md_map_poolpage(pa, NBPG);
   2416   1.1  christos }
   2417   1.1  christos 
   2418   1.1  christos paddr_t
   2419   1.1  christos pmap_unmap_poolpage(vaddr_t va)
   2420   1.1  christos {
   2421   1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   2422   1.1  christos 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   2423   1.1  christos 
   2424   1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   2425  1.15      matt 	KASSERT(pg != NULL);
   2426  1.34     skrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
   2427  1.34     skrll 
   2428  1.15      matt 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
   2429   1.1  christos 	pmap_md_unmap_poolpage(va, NBPG);
   2430   1.1  christos 
   2431   1.1  christos 	return pa;
   2432   1.1  christos }
   2433   1.1  christos #endif /* PMAP_MAP_POOLPAGE */
   2434  1.69     skrll 
   2435  1.69     skrll #ifdef DDB
   2436  1.69     skrll void
   2437  1.69     skrll pmap_db_mdpg_print(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
   2438  1.69     skrll {
   2439  1.69     skrll 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   2440  1.69     skrll 	pv_entry_t pv = &mdpg->mdpg_first;
   2441  1.69     skrll 
   2442  1.69     skrll 	if (pv->pv_pmap == NULL) {
   2443  1.69     skrll 		pr(" no mappings\n");
   2444  1.69     skrll 		return;
   2445  1.69     skrll 	}
   2446  1.69     skrll 
   2447  1.69     skrll 	int lcount = 0;
   2448  1.69     skrll 	if (VM_PAGEMD_VMPAGE_P(mdpg)) {
   2449  1.69     skrll 		pr(" vmpage");
   2450  1.69     skrll 		lcount++;
   2451  1.69     skrll 	}
   2452  1.69     skrll 	if (VM_PAGEMD_POOLPAGE_P(mdpg)) {
   2453  1.69     skrll 		if (lcount != 0)
   2454  1.69     skrll 			pr(",");
   2455  1.69     skrll 		pr(" pool");
   2456  1.69     skrll 		lcount++;
   2457  1.69     skrll 	}
   2458  1.69     skrll #ifdef PMAP_VIRTUAL_CACHE_ALIASES
   2459  1.69     skrll 	if (VM_PAGEMD_UNCACHED_P(mdpg)) {
   2460  1.69     skrll 		if (lcount != 0)
   2461  1.69     skrll 			pr(",");
   2462  1.69     skrll 		pr(" uncached\n");
   2463  1.69     skrll 	}
   2464  1.69     skrll #endif
   2465  1.69     skrll 	pr("\n");
   2466  1.69     skrll 
   2467  1.69     skrll 	lcount = 0;
   2468  1.69     skrll 	if (VM_PAGEMD_REFERENCED_P(mdpg)) {
   2469  1.69     skrll 		pr(" referened");
   2470  1.69     skrll 		lcount++;
   2471  1.69     skrll 	}
   2472  1.69     skrll 	if (VM_PAGEMD_MODIFIED_P(mdpg)) {
   2473  1.69     skrll 		if (lcount != 0)
   2474  1.69     skrll 			pr(",");
   2475  1.69     skrll 		pr(" modified");
   2476  1.69     skrll 		lcount++;
   2477  1.69     skrll 	}
   2478  1.69     skrll 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   2479  1.69     skrll 		if (lcount != 0)
   2480  1.69     skrll 			pr(",");
   2481  1.69     skrll 		pr(" exec");
   2482  1.69     skrll 		lcount++;
   2483  1.69     skrll 	}
   2484  1.69     skrll 	pr("\n");
   2485  1.69     skrll 
   2486  1.69     skrll 	for (size_t i = 0; pv != NULL; pv = pv->pv_next) {
   2487  1.69     skrll 		pr("  pv[%zu] pv=%p\n", i, pv);
   2488  1.69     skrll 		pr("    pv[%zu].pv_pmap = %p", i, pv->pv_pmap);
   2489  1.69     skrll 		pr("    pv[%zu].pv_va   = %" PRIxVADDR " (kenter=%s)\n",
   2490  1.69     skrll 		    i, trunc_page(pv->pv_va), PV_ISKENTER_P(pv) ? "true" : "false");
   2491  1.69     skrll 		i++;
   2492  1.69     skrll 	}
   2493  1.69     skrll }
   2494  1.69     skrll 
   2495  1.69     skrll void
   2496  1.69     skrll pmap_db_pmap_print(struct pmap *pm,
   2497  1.69     skrll     void (*pr)(const char *, ...) __printflike(1, 2))
   2498  1.69     skrll {
   2499  1.69     skrll #if defined(PMAP_HWPAGEWALKER)
   2500  1.69     skrll 	pr(" pm_pdetab     = %p\n", pm->pm_pdetab);
   2501  1.69     skrll #endif
   2502  1.69     skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
   2503  1.69     skrll 	pr(" pm_segtab     = %p\n", pm->pm_segtab);
   2504  1.69     skrll #endif
   2505  1.69     skrll 
   2506  1.69     skrll 	pmap_db_tlb_print(pm, pr);
   2507  1.69     skrll }
   2508  1.69     skrll #endif /* DDB */
   2509