Home | History | Annotate | Line # | Download | only in pmap
pmap.c revision 1.2
      1  1.2      matt /*	$NetBSD: pmap.c,v 1.2 2013/07/17 23:15:20 matt Exp $	*/
      2  1.1  christos 
      3  1.1  christos /*-
      4  1.1  christos  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  1.1  christos  * All rights reserved.
      6  1.1  christos  *
      7  1.1  christos  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  christos  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.1  christos  * NASA Ames Research Center and by Chris G. Demetriou.
     10  1.1  christos  *
     11  1.1  christos  * Redistribution and use in source and binary forms, with or without
     12  1.1  christos  * modification, are permitted provided that the following conditions
     13  1.1  christos  * are met:
     14  1.1  christos  * 1. Redistributions of source code must retain the above copyright
     15  1.1  christos  *    notice, this list of conditions and the following disclaimer.
     16  1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     17  1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     18  1.1  christos  *    documentation and/or other materials provided with the distribution.
     19  1.1  christos  *
     20  1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  1.1  christos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  1.1  christos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  1.1  christos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  1.1  christos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.1  christos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.1  christos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.1  christos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.1  christos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.1  christos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.1  christos  * POSSIBILITY OF SUCH DAMAGE.
     31  1.1  christos  */
     32  1.1  christos 
     33  1.1  christos /*
     34  1.1  christos  * Copyright (c) 1992, 1993
     35  1.1  christos  *	The Regents of the University of California.  All rights reserved.
     36  1.1  christos  *
     37  1.1  christos  * This code is derived from software contributed to Berkeley by
     38  1.1  christos  * the Systems Programming Group of the University of Utah Computer
     39  1.1  christos  * Science Department and Ralph Campbell.
     40  1.1  christos  *
     41  1.1  christos  * Redistribution and use in source and binary forms, with or without
     42  1.1  christos  * modification, are permitted provided that the following conditions
     43  1.1  christos  * are met:
     44  1.1  christos  * 1. Redistributions of source code must retain the above copyright
     45  1.1  christos  *    notice, this list of conditions and the following disclaimer.
     46  1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     47  1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     48  1.1  christos  *    documentation and/or other materials provided with the distribution.
     49  1.1  christos  * 3. Neither the name of the University nor the names of its contributors
     50  1.1  christos  *    may be used to endorse or promote products derived from this software
     51  1.1  christos  *    without specific prior written permission.
     52  1.1  christos  *
     53  1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  1.1  christos  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  1.1  christos  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  1.1  christos  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  1.1  christos  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  1.1  christos  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  1.1  christos  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  1.1  christos  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  1.1  christos  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  1.1  christos  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  1.1  christos  * SUCH DAMAGE.
     64  1.1  christos  *
     65  1.1  christos  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  1.1  christos  */
     67  1.1  christos 
     68  1.1  christos #include <sys/cdefs.h>
     69  1.1  christos 
     70  1.2      matt __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2 2013/07/17 23:15:20 matt Exp $");
     71  1.1  christos 
     72  1.1  christos /*
     73  1.1  christos  *	Manages physical address maps.
     74  1.1  christos  *
     75  1.1  christos  *	In addition to hardware address maps, this
     76  1.1  christos  *	module is called upon to provide software-use-only
     77  1.1  christos  *	maps which may or may not be stored in the same
     78  1.1  christos  *	form as hardware maps.  These pseudo-maps are
     79  1.1  christos  *	used to store intermediate results from copy
     80  1.1  christos  *	operations to and from address spaces.
     81  1.1  christos  *
     82  1.1  christos  *	Since the information managed by this module is
     83  1.1  christos  *	also stored by the logical address mapping module,
     84  1.1  christos  *	this module may throw away valid virtual-to-physical
     85  1.1  christos  *	mappings at almost any time.  However, invalidations
     86  1.1  christos  *	of virtual-to-physical mappings must be done as
     87  1.1  christos  *	requested.
     88  1.1  christos  *
     89  1.1  christos  *	In order to cope with hardware architectures which
     90  1.1  christos  *	make virtual-to-physical map invalidates expensive,
     91  1.1  christos  *	this module may delay invalidate or reduced protection
     92  1.1  christos  *	operations until such time as they are actually
     93  1.1  christos  *	necessary.  This module is given full information as
     94  1.1  christos  *	to which processors are currently using which maps,
     95  1.1  christos  *	and to when physical maps must be made correct.
     96  1.1  christos  */
     97  1.1  christos 
     98  1.1  christos #include "opt_modular.h"
     99  1.1  christos #include "opt_multiprocessor.h"
    100  1.1  christos #include "opt_sysv.h"
    101  1.1  christos 
    102  1.1  christos #define __PMAP_PRIVATE
    103  1.1  christos 
    104  1.1  christos #include <sys/param.h>
    105  1.1  christos #include <sys/systm.h>
    106  1.1  christos #include <sys/proc.h>
    107  1.1  christos #include <sys/buf.h>
    108  1.1  christos #include <sys/pool.h>
    109  1.1  christos #include <sys/atomic.h>
    110  1.1  christos #include <sys/mutex.h>
    111  1.1  christos #include <sys/atomic.h>
    112  1.1  christos #ifdef SYSVSHM
    113  1.1  christos #include <sys/shm.h>
    114  1.1  christos #endif
    115  1.1  christos #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
    116  1.1  christos 
    117  1.1  christos #include <uvm/uvm.h>
    118  1.1  christos 
    119  1.1  christos #define	PMAP_COUNT(name)	(pmap_evcnt_##name.ev_count++ + 0)
    120  1.1  christos #define PMAP_COUNTER(name, desc) \
    121  1.1  christos static struct evcnt pmap_evcnt_##name = \
    122  1.1  christos 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
    123  1.1  christos EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
    124  1.1  christos 
    125  1.1  christos PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
    126  1.1  christos PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
    127  1.1  christos PMAP_COUNTER(remove_user_calls, "remove user calls");
    128  1.1  christos PMAP_COUNTER(remove_user_pages, "user pages unmapped");
    129  1.1  christos PMAP_COUNTER(remove_flushes, "remove cache flushes");
    130  1.1  christos PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
    131  1.1  christos PMAP_COUNTER(remove_pvfirst, "remove pv first");
    132  1.1  christos PMAP_COUNTER(remove_pvsearch, "remove pv search");
    133  1.1  christos 
    134  1.1  christos PMAP_COUNTER(prefer_requests, "prefer requests");
    135  1.1  christos PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
    136  1.1  christos 
    137  1.1  christos PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
    138  1.1  christos PMAP_COUNTER(zeroed_pages, "pages zeroed");
    139  1.1  christos PMAP_COUNTER(copied_pages, "pages copied");
    140  1.1  christos 
    141  1.1  christos PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
    142  1.1  christos PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
    143  1.1  christos PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
    144  1.1  christos PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
    145  1.1  christos 
    146  1.1  christos PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
    147  1.1  christos PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
    148  1.1  christos 
    149  1.1  christos PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
    150  1.1  christos PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
    151  1.1  christos PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
    152  1.1  christos PMAP_COUNTER(user_mappings, "user pages mapped");
    153  1.1  christos PMAP_COUNTER(user_mappings_changed, "user mapping changed");
    154  1.1  christos PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
    155  1.1  christos PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
    156  1.1  christos PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
    157  1.1  christos PMAP_COUNTER(managed_mappings, "managed pages mapped");
    158  1.1  christos PMAP_COUNTER(mappings, "pages mapped");
    159  1.1  christos PMAP_COUNTER(remappings, "pages remapped");
    160  1.1  christos PMAP_COUNTER(unmappings, "pages unmapped");
    161  1.1  christos PMAP_COUNTER(primary_mappings, "page initial mappings");
    162  1.1  christos PMAP_COUNTER(primary_unmappings, "page final unmappings");
    163  1.1  christos PMAP_COUNTER(tlb_hit, "page mapping");
    164  1.1  christos 
    165  1.1  christos PMAP_COUNTER(exec_mappings, "exec pages mapped");
    166  1.1  christos PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
    167  1.1  christos PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
    168  1.1  christos PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
    169  1.1  christos PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
    170  1.1  christos PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
    171  1.1  christos PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
    172  1.1  christos PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
    173  1.1  christos PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
    174  1.1  christos PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
    175  1.1  christos PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
    176  1.1  christos 
    177  1.1  christos PMAP_COUNTER(create, "creates");
    178  1.1  christos PMAP_COUNTER(reference, "references");
    179  1.1  christos PMAP_COUNTER(dereference, "dereferences");
    180  1.1  christos PMAP_COUNTER(destroy, "destroyed");
    181  1.1  christos PMAP_COUNTER(activate, "activations");
    182  1.1  christos PMAP_COUNTER(deactivate, "deactivations");
    183  1.1  christos PMAP_COUNTER(update, "updates");
    184  1.1  christos #ifdef MULTIPROCESSOR
    185  1.1  christos PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
    186  1.1  christos #endif
    187  1.1  christos PMAP_COUNTER(unwire, "unwires");
    188  1.1  christos PMAP_COUNTER(copy, "copies");
    189  1.1  christos PMAP_COUNTER(clear_modify, "clear_modifies");
    190  1.1  christos PMAP_COUNTER(protect, "protects");
    191  1.1  christos PMAP_COUNTER(page_protect, "page_protects");
    192  1.1  christos 
    193  1.1  christos #define PMAP_ASID_RESERVED 0
    194  1.1  christos CTASSERT(PMAP_ASID_RESERVED == 0);
    195  1.1  christos 
    196  1.1  christos /*
    197  1.1  christos  * Initialize the kernel pmap.
    198  1.1  christos  */
    199  1.1  christos #ifdef MULTIPROCESSOR
    200  1.1  christos #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[MAXCPUS])
    201  1.1  christos #else
    202  1.1  christos #define	PMAP_SIZE	sizeof(struct pmap)
    203  1.1  christos kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
    204  1.1  christos #endif
    205  1.1  christos 
    206  1.1  christos struct pmap_kernel kernel_pmap_store = {
    207  1.1  christos 	.kernel_pmap = {
    208  1.1  christos 		.pm_count = 1,
    209  1.1  christos 		.pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
    210  1.1  christos 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
    211  1.1  christos 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
    212  1.1  christos 	},
    213  1.1  christos };
    214  1.1  christos 
    215  1.1  christos struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
    216  1.1  christos 
    217  1.1  christos struct pmap_limits pmap_limits;
    218  1.1  christos 
    219  1.1  christos #ifdef UVMHIST
    220  1.1  christos static struct kern_history_ent pmapexechistbuf[10000];
    221  1.1  christos static struct kern_history_ent pmaphistbuf[10000];
    222  1.1  christos #endif
    223  1.1  christos 
    224  1.1  christos /*
    225  1.1  christos  * The pools from which pmap structures and sub-structures are allocated.
    226  1.1  christos  */
    227  1.1  christos struct pool pmap_pmap_pool;
    228  1.1  christos struct pool pmap_pv_pool;
    229  1.1  christos 
    230  1.1  christos #ifndef PMAP_PV_LOWAT
    231  1.1  christos #define	PMAP_PV_LOWAT	16
    232  1.1  christos #endif
    233  1.1  christos int		pmap_pv_lowat = PMAP_PV_LOWAT;
    234  1.1  christos 
    235  1.1  christos bool		pmap_initialized = false;
    236  1.1  christos #define	PMAP_PAGE_COLOROK_P(a, b) \
    237  1.1  christos 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
    238  1.1  christos u_int		pmap_page_colormask;
    239  1.1  christos 
    240  1.1  christos #define PAGE_IS_MANAGED(pa)	\
    241  1.1  christos 	(pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
    242  1.1  christos 
    243  1.1  christos #define PMAP_IS_ACTIVE(pm)						\
    244  1.1  christos 	((pm) == pmap_kernel() || 					\
    245  1.1  christos 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
    246  1.1  christos 
    247  1.1  christos /* Forward function declarations */
    248  1.1  christos void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
    249  1.1  christos void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
    250  1.1  christos 
    251  1.1  christos /*
    252  1.1  christos  * PV table management functions.
    253  1.1  christos  */
    254  1.1  christos void	*pmap_pv_page_alloc(struct pool *, int);
    255  1.1  christos void	pmap_pv_page_free(struct pool *, void *);
    256  1.1  christos 
    257  1.1  christos struct pool_allocator pmap_pv_page_allocator = {
    258  1.1  christos 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
    259  1.1  christos };
    260  1.1  christos 
    261  1.1  christos #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    262  1.1  christos #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
    263  1.1  christos 
    264  1.1  christos /*
    265  1.1  christos  * Misc. functions.
    266  1.1  christos  */
    267  1.1  christos 
    268  1.1  christos bool
    269  1.1  christos pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
    270  1.1  christos {
    271  1.1  christos 	volatile u_int * const attrp = &mdpg->mdpg_attrs;
    272  1.1  christos #ifdef MULTIPROCESSOR
    273  1.1  christos 	for (;;) {
    274  1.1  christos 		u_int old_attr = *attrp;
    275  1.1  christos 		if ((old_attr & clear_attributes) == 0)
    276  1.1  christos 			return false;
    277  1.1  christos 		u_int new_attr = old_attr & ~clear_attributes;
    278  1.1  christos 		if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
    279  1.1  christos 			return true;
    280  1.1  christos 	}
    281  1.1  christos #else
    282  1.1  christos 	u_int old_attr = *attrp;
    283  1.1  christos 	if ((old_attr & clear_attributes) == 0)
    284  1.1  christos 		return false;
    285  1.1  christos 	*attrp &= ~clear_attributes;
    286  1.1  christos 	return true;
    287  1.1  christos #endif
    288  1.1  christos }
    289  1.1  christos 
    290  1.1  christos void
    291  1.1  christos pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
    292  1.1  christos {
    293  1.1  christos #ifdef MULTIPROCESSOR
    294  1.1  christos 	atomic_or_uint(&mdpg->mdpg_attrs, set_attributes);
    295  1.1  christos #else
    296  1.1  christos 	mdpg->mdpg_attrs |= set_attributes;
    297  1.1  christos #endif
    298  1.1  christos }
    299  1.1  christos 
    300  1.1  christos static void
    301  1.1  christos pmap_page_syncicache(struct vm_page *pg)
    302  1.1  christos {
    303  1.1  christos #ifndef MULTIPROCESSOR
    304  1.1  christos 	struct pmap * const curpmap = curcpu()->ci_curpm;
    305  1.1  christos #endif
    306  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    307  1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
    308  1.2      matt 	kcpuset_t *onproc;
    309  1.2      matt #ifdef MULTIPROCESSOR
    310  1.2      matt 	kcpuset_create(&onproc, true);
    311  1.2      matt #endif
    312  1.1  christos 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    313  1.2      matt 
    314  1.1  christos 	if (pv->pv_pmap != NULL) {
    315  1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
    316  1.1  christos #ifdef MULTIPROCESSOR
    317  1.2      matt 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
    318  1.2      matt 			if (kcpuset_match(onproc, kcpuset_running)) {
    319  1.1  christos 				break;
    320  1.1  christos 			}
    321  1.1  christos #else
    322  1.1  christos 			if (pv->pv_pmap == curpmap) {
    323  1.2      matt 				onproc = curcpu()->ci_data.cpu_kcpuset;
    324  1.1  christos 				break;
    325  1.1  christos 			}
    326  1.1  christos #endif
    327  1.1  christos 		}
    328  1.1  christos 	}
    329  1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    330  1.1  christos 	kpreempt_disable();
    331  1.1  christos 	pmap_md_page_syncicache(pg, onproc);
    332  1.2      matt #ifdef MULTIPROCESSOR
    333  1.2      matt 	kcpuset_destroy(onproc);
    334  1.2      matt #endif
    335  1.1  christos 	kpreempt_enable();
    336  1.1  christos }
    337  1.1  christos 
    338  1.1  christos /*
    339  1.1  christos  * Define the initial bounds of the kernel virtual address space.
    340  1.1  christos  */
    341  1.1  christos void
    342  1.1  christos pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    343  1.1  christos {
    344  1.1  christos 
    345  1.1  christos 	*vstartp = VM_MIN_KERNEL_ADDRESS;
    346  1.1  christos 	*vendp = VM_MAX_KERNEL_ADDRESS;
    347  1.1  christos }
    348  1.1  christos 
    349  1.1  christos vaddr_t
    350  1.1  christos pmap_growkernel(vaddr_t maxkvaddr)
    351  1.1  christos {
    352  1.1  christos 	vaddr_t virtual_end = pmap_limits.virtual_end;
    353  1.1  christos 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
    354  1.1  christos 
    355  1.1  christos 	/*
    356  1.1  christos 	 * Reserve PTEs for the new KVA space.
    357  1.1  christos 	 */
    358  1.1  christos 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
    359  1.1  christos 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
    360  1.1  christos 	}
    361  1.1  christos 
    362  1.1  christos 	/*
    363  1.1  christos 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
    364  1.1  christos 	 */
    365  1.1  christos 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
    366  1.1  christos 		virtual_end = VM_MAX_KERNEL_ADDRESS;
    367  1.1  christos 
    368  1.1  christos 	/*
    369  1.1  christos 	 * Update new end.
    370  1.1  christos 	 */
    371  1.1  christos 	pmap_limits.virtual_end = virtual_end;
    372  1.1  christos 	return virtual_end;
    373  1.1  christos }
    374  1.1  christos 
    375  1.1  christos /*
    376  1.1  christos  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
    377  1.1  christos  * This function allows for early dynamic memory allocation until the virtual
    378  1.1  christos  * memory system has been bootstrapped.  After that point, either kmem_alloc
    379  1.1  christos  * or malloc should be used.  This function works by stealing pages from the
    380  1.1  christos  * (to be) managed page pool, then implicitly mapping the pages (by using
    381  1.1  christos  * their k0seg addresses) and zeroing them.
    382  1.1  christos  *
    383  1.1  christos  * It may be used once the physical memory segments have been pre-loaded
    384  1.1  christos  * into the vm_physmem[] array.  Early memory allocation MUST use this
    385  1.1  christos  * interface!  This cannot be used after vm_page_startup(), and will
    386  1.1  christos  * generate a panic if tried.
    387  1.1  christos  *
    388  1.1  christos  * Note that this memory will never be freed, and in essence it is wired
    389  1.1  christos  * down.
    390  1.1  christos  *
    391  1.1  christos  * We must adjust *vstartp and/or *vendp iff we use address space
    392  1.1  christos  * from the kernel virtual address range defined by pmap_virtual_space().
    393  1.1  christos  */
    394  1.1  christos vaddr_t
    395  1.1  christos pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
    396  1.1  christos {
    397  1.1  christos 	u_int npgs;
    398  1.1  christos 	paddr_t pa;
    399  1.1  christos 	vaddr_t va;
    400  1.1  christos 
    401  1.1  christos 	size = round_page(size);
    402  1.1  christos 	npgs = atop(size);
    403  1.1  christos 
    404  1.1  christos 	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
    405  1.1  christos 		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
    406  1.1  christos 		if (uvm.page_init_done == true)
    407  1.1  christos 			panic("pmap_steal_memory: called _after_ bootstrap");
    408  1.1  christos 
    409  1.1  christos 		if (seg->avail_start != seg->start ||
    410  1.1  christos 		    seg->avail_start >= seg->avail_end)
    411  1.1  christos 			continue;
    412  1.1  christos 
    413  1.1  christos 		if ((seg->avail_end - seg->avail_start) < npgs)
    414  1.1  christos 			continue;
    415  1.1  christos 
    416  1.1  christos 		/*
    417  1.1  christos 		 * There are enough pages here; steal them!
    418  1.1  christos 		 */
    419  1.1  christos 		pa = ptoa(seg->avail_start);
    420  1.1  christos 		seg->avail_start += npgs;
    421  1.1  christos 		seg->start += npgs;
    422  1.1  christos 
    423  1.1  christos 		/*
    424  1.1  christos 		 * Have we used up this segment?
    425  1.1  christos 		 */
    426  1.1  christos 		if (seg->avail_start == seg->end) {
    427  1.1  christos 			if (vm_nphysseg == 1)
    428  1.1  christos 				panic("pmap_steal_memory: out of memory!");
    429  1.1  christos 
    430  1.1  christos 			/* Remove this segment from the list. */
    431  1.1  christos 			vm_nphysseg--;
    432  1.1  christos 			if (bank < vm_nphysseg)
    433  1.1  christos 				memmove(seg, seg+1,
    434  1.1  christos 				    sizeof(*seg) * (vm_nphysseg - bank));
    435  1.1  christos 		}
    436  1.1  christos 
    437  1.1  christos 		va = pmap_md_map_poolpage(pa, size);
    438  1.1  christos 		memset((void *)va, 0, size);
    439  1.1  christos 		return va;
    440  1.1  christos 	}
    441  1.1  christos 
    442  1.1  christos 	/*
    443  1.1  christos 	 * If we got here, there was no memory left.
    444  1.1  christos 	 */
    445  1.1  christos 	panic("pmap_steal_memory: no memory to steal");
    446  1.1  christos }
    447  1.1  christos 
    448  1.1  christos /*
    449  1.1  christos  *	Initialize the pmap module.
    450  1.1  christos  *	Called by vm_init, to initialize any structures that the pmap
    451  1.1  christos  *	system needs to map virtual memory.
    452  1.1  christos  */
    453  1.1  christos void
    454  1.1  christos pmap_init(void)
    455  1.1  christos {
    456  1.1  christos 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
    457  1.1  christos 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
    458  1.1  christos 
    459  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    460  1.1  christos 
    461  1.1  christos 	/*
    462  1.1  christos 	 * Initialize the segtab lock.
    463  1.1  christos 	 */
    464  1.1  christos 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
    465  1.1  christos 
    466  1.1  christos 	/*
    467  1.1  christos 	 * Set a low water mark on the pv_entry pool, so that we are
    468  1.1  christos 	 * more likely to have these around even in extreme memory
    469  1.1  christos 	 * starvation.
    470  1.1  christos 	 */
    471  1.1  christos 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
    472  1.1  christos 
    473  1.1  christos 	pmap_md_init();
    474  1.1  christos 
    475  1.1  christos 	/*
    476  1.1  christos 	 * Now it is safe to enable pv entry recording.
    477  1.1  christos 	 */
    478  1.1  christos 	pmap_initialized = true;
    479  1.1  christos }
    480  1.1  christos 
    481  1.1  christos /*
    482  1.1  christos  *	Create and return a physical map.
    483  1.1  christos  *
    484  1.1  christos  *	If the size specified for the map
    485  1.1  christos  *	is zero, the map is an actual physical
    486  1.1  christos  *	map, and may be referenced by the
    487  1.1  christos  *	hardware.
    488  1.1  christos  *
    489  1.1  christos  *	If the size specified is non-zero,
    490  1.1  christos  *	the map will be used in software only, and
    491  1.1  christos  *	is bounded by that size.
    492  1.1  christos  */
    493  1.1  christos pmap_t
    494  1.1  christos pmap_create(void)
    495  1.1  christos {
    496  1.1  christos 	pmap_t pmap;
    497  1.1  christos 
    498  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    499  1.1  christos 	PMAP_COUNT(create);
    500  1.1  christos 
    501  1.1  christos 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    502  1.1  christos 	memset(pmap, 0, PMAP_SIZE);
    503  1.1  christos 
    504  1.1  christos 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
    505  1.1  christos 
    506  1.1  christos 	pmap->pm_count = 1;
    507  1.1  christos 	pmap->pm_minaddr = VM_MIN_ADDRESS;
    508  1.1  christos 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
    509  1.1  christos 
    510  1.1  christos 	pmap_segtab_init(pmap);
    511  1.1  christos 
    512  1.1  christos 	UVMHIST_LOG(pmaphist, "<- pmap %p", pmap,0,0,0);
    513  1.1  christos 	return pmap;
    514  1.1  christos }
    515  1.1  christos 
    516  1.1  christos /*
    517  1.1  christos  *	Retire the given physical map from service.
    518  1.1  christos  *	Should only be called if the map contains
    519  1.1  christos  *	no valid mappings.
    520  1.1  christos  */
    521  1.1  christos void
    522  1.1  christos pmap_destroy(pmap_t pmap)
    523  1.1  christos {
    524  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    525  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    526  1.1  christos 
    527  1.1  christos 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
    528  1.1  christos 		PMAP_COUNT(dereference);
    529  1.1  christos 		return;
    530  1.1  christos 	}
    531  1.1  christos 
    532  1.1  christos 	KASSERT(pmap->pm_count == 0);
    533  1.1  christos 	PMAP_COUNT(destroy);
    534  1.1  christos 	kpreempt_disable();
    535  1.1  christos 	pmap_tlb_asid_release_all(pmap);
    536  1.1  christos 	pmap_segtab_destroy(pmap, NULL, 0);
    537  1.1  christos 
    538  1.1  christos 	pool_put(&pmap_pmap_pool, pmap);
    539  1.1  christos 	kpreempt_enable();
    540  1.1  christos 
    541  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    542  1.1  christos }
    543  1.1  christos 
    544  1.1  christos /*
    545  1.1  christos  *	Add a reference to the specified pmap.
    546  1.1  christos  */
    547  1.1  christos void
    548  1.1  christos pmap_reference(pmap_t pmap)
    549  1.1  christos {
    550  1.1  christos 
    551  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    552  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    553  1.1  christos 	PMAP_COUNT(reference);
    554  1.1  christos 
    555  1.1  christos 	if (pmap != NULL) {
    556  1.1  christos 		atomic_inc_uint(&pmap->pm_count);
    557  1.1  christos 	}
    558  1.1  christos 
    559  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    560  1.1  christos }
    561  1.1  christos 
    562  1.1  christos /*
    563  1.1  christos  *	Make a new pmap (vmspace) active for the given process.
    564  1.1  christos  */
    565  1.1  christos void
    566  1.1  christos pmap_activate(struct lwp *l)
    567  1.1  christos {
    568  1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    569  1.1  christos 
    570  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    571  1.1  christos 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    572  1.1  christos 	PMAP_COUNT(activate);
    573  1.1  christos 
    574  1.1  christos 	kpreempt_disable();
    575  1.1  christos 	pmap_tlb_asid_acquire(pmap, l);
    576  1.1  christos 	if (l == curlwp) {
    577  1.1  christos 		pmap_segtab_activate(pmap, l);
    578  1.1  christos 	}
    579  1.1  christos 	kpreempt_enable();
    580  1.1  christos 
    581  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    582  1.1  christos }
    583  1.1  christos 
    584  1.1  christos /*
    585  1.1  christos  *	Make a previously active pmap (vmspace) inactive.
    586  1.1  christos  */
    587  1.1  christos void
    588  1.1  christos pmap_deactivate(struct lwp *l)
    589  1.1  christos {
    590  1.1  christos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    591  1.1  christos 
    592  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    593  1.1  christos 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
    594  1.1  christos 	PMAP_COUNT(deactivate);
    595  1.1  christos 
    596  1.1  christos 	kpreempt_disable();
    597  1.1  christos 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    598  1.1  christos 	pmap_tlb_asid_deactivate(pmap);
    599  1.1  christos 	kpreempt_enable();
    600  1.1  christos 
    601  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    602  1.1  christos }
    603  1.1  christos 
    604  1.1  christos void
    605  1.1  christos pmap_update(struct pmap *pmap)
    606  1.1  christos {
    607  1.1  christos 
    608  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    609  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
    610  1.1  christos 	PMAP_COUNT(update);
    611  1.1  christos 
    612  1.1  christos 	kpreempt_disable();
    613  1.1  christos #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
    614  1.1  christos 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
    615  1.1  christos 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
    616  1.1  christos 		PMAP_COUNT(shootdown_ipis);
    617  1.1  christos #endif
    618  1.1  christos #ifdef DEBUG
    619  1.1  christos 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
    620  1.1  christos #endif /* DEBUG */
    621  1.1  christos 
    622  1.1  christos 	/*
    623  1.1  christos 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
    624  1.1  christos 	 * our ASID.  Now we have to reactivate ourselves.
    625  1.1  christos 	 */
    626  1.1  christos 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
    627  1.1  christos 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
    628  1.1  christos 		pmap_tlb_asid_acquire(pmap, curlwp);
    629  1.1  christos 		pmap_segtab_activate(pmap, curlwp);
    630  1.1  christos 	}
    631  1.1  christos 	kpreempt_enable();
    632  1.1  christos 
    633  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    634  1.1  christos }
    635  1.1  christos 
    636  1.1  christos /*
    637  1.1  christos  *	Remove the given range of addresses from the specified map.
    638  1.1  christos  *
    639  1.1  christos  *	It is assumed that the start and end are properly
    640  1.1  christos  *	rounded to the page size.
    641  1.1  christos  */
    642  1.1  christos 
    643  1.1  christos static bool
    644  1.1  christos pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    645  1.1  christos 	uintptr_t flags)
    646  1.1  christos {
    647  1.1  christos 	const pt_entry_t npte = flags;
    648  1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    649  1.1  christos 
    650  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    651  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    652  1.1  christos 	    pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva);
    653  1.1  christos 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    654  1.1  christos 	    ptep, flags, 0, 0);
    655  1.1  christos 
    656  1.1  christos 	KASSERT(kpreempt_disabled());
    657  1.1  christos 
    658  1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
    659  1.1  christos 		pt_entry_t pt_entry = *ptep;
    660  1.1  christos 		if (!pte_valid_p(pt_entry))
    661  1.1  christos 			continue;
    662  1.1  christos 		if (is_kernel_pmap_p)
    663  1.1  christos 			PMAP_COUNT(remove_kernel_calls);
    664  1.1  christos 		else
    665  1.1  christos 			PMAP_COUNT(remove_user_pages);
    666  1.1  christos 		if (pte_wired_p(pt_entry))
    667  1.1  christos 			pmap->pm_stats.wired_count--;
    668  1.1  christos 		pmap->pm_stats.resident_count--;
    669  1.1  christos 		struct vm_page *pg = PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    670  1.1  christos 		if (__predict_true(pg != NULL)) {
    671  1.1  christos 			pmap_remove_pv(pmap, sva, pg,
    672  1.1  christos 			   pte_modified_p(pt_entry));
    673  1.1  christos 		}
    674  1.1  christos 		*ptep = npte;
    675  1.1  christos 		/*
    676  1.1  christos 		 * Flush the TLB for the given address.
    677  1.1  christos 		 */
    678  1.1  christos 		pmap_tlb_invalidate_addr(pmap, sva);
    679  1.1  christos 	}
    680  1.1  christos 	return false;
    681  1.1  christos }
    682  1.1  christos 
    683  1.1  christos void
    684  1.1  christos pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    685  1.1  christos {
    686  1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    687  1.1  christos 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
    688  1.1  christos 
    689  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    690  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR")",
    691  1.1  christos 	    pmap, sva, eva, 0);
    692  1.1  christos 
    693  1.1  christos 	if (is_kernel_pmap_p)
    694  1.1  christos 		PMAP_COUNT(remove_kernel_calls);
    695  1.1  christos 	else
    696  1.1  christos 		PMAP_COUNT(remove_user_calls);
    697  1.1  christos #ifdef PARANOIADIAG
    698  1.1  christos 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    699  1.1  christos 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    700  1.1  christos 		    __func__, sva, eva - 1);
    701  1.1  christos 	if (PMAP_IS_ACTIVE(pmap)) {
    702  1.1  christos 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    703  1.1  christos 		uint32_t asid = tlb_get_asid();
    704  1.1  christos 		if (asid != pai->pai_asid) {
    705  1.1  christos 			panic("%s: inconsistency for active TLB flush"
    706  1.1  christos 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    707  1.1  christos 		}
    708  1.1  christos 	}
    709  1.1  christos #endif
    710  1.1  christos 	kpreempt_disable();
    711  1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
    712  1.1  christos 	kpreempt_enable();
    713  1.1  christos 
    714  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    715  1.1  christos }
    716  1.1  christos 
    717  1.1  christos /*
    718  1.1  christos  *	pmap_page_protect:
    719  1.1  christos  *
    720  1.1  christos  *	Lower the permission for all mappings to a given page.
    721  1.1  christos  */
    722  1.1  christos void
    723  1.1  christos pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    724  1.1  christos {
    725  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    726  1.1  christos 	pv_entry_t pv;
    727  1.1  christos 	vaddr_t va;
    728  1.1  christos 
    729  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    730  1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") prot=%#x)",
    731  1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), prot, 0);
    732  1.1  christos 	PMAP_COUNT(page_protect);
    733  1.1  christos 
    734  1.1  christos 	switch (prot) {
    735  1.1  christos 	case VM_PROT_READ|VM_PROT_WRITE:
    736  1.1  christos 	case VM_PROT_ALL:
    737  1.1  christos 		break;
    738  1.1  christos 
    739  1.1  christos 	/* copy_on_write */
    740  1.1  christos 	case VM_PROT_READ:
    741  1.1  christos 	case VM_PROT_READ|VM_PROT_EXECUTE:
    742  1.1  christos 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    743  1.1  christos 		pv = &mdpg->mdpg_first;
    744  1.1  christos 		/*
    745  1.1  christos 		 * Loop over all current mappings setting/clearing as appropriate.
    746  1.1  christos 		 */
    747  1.1  christos 		if (pv->pv_pmap != NULL) {
    748  1.1  christos 			while (pv != NULL) {
    749  1.1  christos 				const pmap_t pmap = pv->pv_pmap;
    750  1.1  christos 				const uint16_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
    751  1.1  christos 				va = pv->pv_va;
    752  1.1  christos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    753  1.1  christos 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
    754  1.1  christos 				KASSERT(pv->pv_pmap == pmap);
    755  1.1  christos 				pmap_update(pmap);
    756  1.1  christos 				if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false)) {
    757  1.1  christos 					pv = &mdpg->mdpg_first;
    758  1.1  christos 				} else {
    759  1.1  christos 					pv = pv->pv_next;
    760  1.1  christos 				}
    761  1.1  christos 			}
    762  1.1  christos 		}
    763  1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    764  1.1  christos 		break;
    765  1.1  christos 
    766  1.1  christos 	/* remove_all */
    767  1.1  christos 	default:
    768  1.1  christos 		/*
    769  1.1  christos 		 * Do this first so that for each unmapping, pmap_remove_pv
    770  1.1  christos 		 * won't try to sync the icache.
    771  1.1  christos 		 */
    772  1.1  christos 		if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) {
    773  1.1  christos 			UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR
    774  1.1  christos 			    "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0);
    775  1.1  christos 			PMAP_COUNT(exec_uncached_page_protect);
    776  1.1  christos 		}
    777  1.1  christos 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    778  1.1  christos 		pv = &mdpg->mdpg_first;
    779  1.1  christos 		while (pv->pv_pmap != NULL) {
    780  1.1  christos 			const pmap_t pmap = pv->pv_pmap;
    781  1.1  christos 			va = pv->pv_va;
    782  1.1  christos 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    783  1.1  christos 			pmap_remove(pmap, va, va + PAGE_SIZE);
    784  1.1  christos 			pmap_update(pmap);
    785  1.1  christos 			(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
    786  1.1  christos 		}
    787  1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
    788  1.1  christos 	}
    789  1.1  christos 
    790  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    791  1.1  christos }
    792  1.1  christos 
    793  1.1  christos static bool
    794  1.1  christos pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
    795  1.1  christos 	uintptr_t flags)
    796  1.1  christos {
    797  1.1  christos 	const vm_prot_t prot = (flags & VM_PROT_ALL);
    798  1.1  christos 
    799  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    800  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
    801  1.1  christos 	    pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva);
    802  1.1  christos 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
    803  1.1  christos 	    ptep, flags, 0, 0);
    804  1.1  christos 
    805  1.1  christos 	KASSERT(kpreempt_disabled());
    806  1.1  christos 	/*
    807  1.1  christos 	 * Change protection on every valid mapping within this segment.
    808  1.1  christos 	 */
    809  1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
    810  1.1  christos 		pt_entry_t pt_entry = *ptep;
    811  1.1  christos 		if (!pte_valid_p(pt_entry))
    812  1.1  christos 			continue;
    813  1.1  christos 		struct vm_page * const pg =
    814  1.1  christos 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
    815  1.1  christos 		if (pg != NULL && pte_modified_p(pt_entry)) {
    816  1.1  christos 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    817  1.1  christos 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
    818  1.1  christos 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
    819  1.1  christos 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
    820  1.1  christos 				if (pte_cached_p(pt_entry)) {
    821  1.1  christos 					UVMHIST_LOG(pmapexechist,
    822  1.1  christos 					    "pg %p (pa %#"PRIxPADDR"): %s",
    823  1.1  christos 					    pg, VM_PAGE_TO_PHYS(pg),
    824  1.1  christos 					    "syncicached performed", 0);
    825  1.1  christos 					pmap_page_syncicache(pg);
    826  1.1  christos 					PMAP_COUNT(exec_synced_protect);
    827  1.1  christos 				}
    828  1.1  christos 			}
    829  1.1  christos 		}
    830  1.1  christos 		pt_entry = pte_prot_downgrade(pt_entry, prot);
    831  1.1  christos 		if (*ptep != pt_entry) {
    832  1.1  christos 			*ptep = pt_entry;
    833  1.1  christos 			/*
    834  1.1  christos 			 * Update the TLB if needed.
    835  1.1  christos 			 */
    836  1.1  christos 			pmap_tlb_update_addr(pmap, sva, pt_entry,
    837  1.1  christos 			    PMAP_TLB_NEED_IPI);
    838  1.1  christos 		}
    839  1.1  christos 	}
    840  1.1  christos 	return false;
    841  1.1  christos }
    842  1.1  christos 
    843  1.1  christos /*
    844  1.1  christos  *	Set the physical protection on the
    845  1.1  christos  *	specified range of this map as requested.
    846  1.1  christos  */
    847  1.1  christos void
    848  1.1  christos pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    849  1.1  christos {
    850  1.1  christos 
    851  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    852  1.1  christos 	UVMHIST_LOG(pmaphist,
    853  1.1  christos 	    "  pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR" port=%#x)",
    854  1.1  christos 	    pmap, sva, eva, prot);
    855  1.1  christos 	PMAP_COUNT(protect);
    856  1.1  christos 
    857  1.1  christos 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
    858  1.1  christos 		pmap_remove(pmap, sva, eva);
    859  1.1  christos 		UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    860  1.1  christos 		return;
    861  1.1  christos 	}
    862  1.1  christos 
    863  1.1  christos #ifdef PARANOIADIAG
    864  1.1  christos 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
    865  1.1  christos 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
    866  1.1  christos 		    __func__, sva, eva - 1);
    867  1.1  christos 	if (PMAP_IS_ACTIVE(pmap)) {
    868  1.1  christos 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
    869  1.1  christos 		uint32_t asid = tlb_get_asid();
    870  1.1  christos 		if (asid != pai->pai_asid) {
    871  1.1  christos 			panic("%s: inconsistency for active TLB update"
    872  1.1  christos 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
    873  1.1  christos 		}
    874  1.1  christos 	}
    875  1.1  christos #endif
    876  1.1  christos 
    877  1.1  christos 	/*
    878  1.1  christos 	 * Change protection on every valid mapping within this segment.
    879  1.1  christos 	 */
    880  1.1  christos 	kpreempt_disable();
    881  1.1  christos 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
    882  1.1  christos 	kpreempt_enable();
    883  1.1  christos 
    884  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    885  1.1  christos }
    886  1.1  christos 
    887  1.1  christos #if defined(__PMAP_VIRTUAL_CACHE_ALIASES)
    888  1.1  christos /*
    889  1.1  christos  *	pmap_page_cache:
    890  1.1  christos  *
    891  1.1  christos  *	Change all mappings of a managed page to cached/uncached.
    892  1.1  christos  */
    893  1.1  christos static void
    894  1.1  christos pmap_page_cache(struct vm_page *pg, bool cached)
    895  1.1  christos {
    896  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
    897  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
    898  1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)",
    899  1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0);
    900  1.1  christos 	KASSERT(kpreempt_disabled());
    901  1.1  christos 
    902  1.1  christos 	if (cached) {
    903  1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
    904  1.1  christos 		PMAP_COUNT(page_cache_restorations);
    905  1.1  christos 	} else {
    906  1.1  christos 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
    907  1.1  christos 		PMAP_COUNT(page_cache_evictions);
    908  1.1  christos 	}
    909  1.1  christos 
    910  1.1  christos 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
    911  1.1  christos 	KASSERT(kpreempt_disabled());
    912  1.1  christos 	for (pv_entry_t pv = &mdpg->mdpg_first;
    913  1.1  christos 	     pv != NULL;
    914  1.1  christos 	     pv = pv->pv_next) {
    915  1.1  christos 		pmap_t pmap = pv->pv_pmap;
    916  1.1  christos 		vaddr_t va = pv->pv_va;
    917  1.1  christos 
    918  1.1  christos 		KASSERT(pmap != NULL);
    919  1.1  christos 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
    920  1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
    921  1.1  christos 		if (ptep == NULL)
    922  1.1  christos 			continue;
    923  1.1  christos 		pt_entry_t pt_entry = *ptep;
    924  1.1  christos 		if (pte_valid_p(pt_entry)) {
    925  1.1  christos 			pt_entry = pte_cached_change(pt_entry, cached);
    926  1.1  christos 			*ptep = pt_entry;
    927  1.1  christos 			pmap_tlb_update_addr(pmap, va, pt_entry,
    928  1.1  christos 			    PMAP_TLB_NEED_IPI);
    929  1.1  christos 		}
    930  1.1  christos 	}
    931  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
    932  1.1  christos }
    933  1.1  christos #endif	/* __PMAP_VIRTUAL_CACHE_ALIASES */
    934  1.1  christos 
    935  1.1  christos /*
    936  1.1  christos  *	Insert the given physical page (p) at
    937  1.1  christos  *	the specified virtual address (v) in the
    938  1.1  christos  *	target physical map with the protection requested.
    939  1.1  christos  *
    940  1.1  christos  *	If specified, the page will be wired down, meaning
    941  1.1  christos  *	that the related pte can not be reclaimed.
    942  1.1  christos  *
    943  1.1  christos  *	NB:  This is the only routine which MAY NOT lazy-evaluate
    944  1.1  christos  *	or lose information.  That is, this routine must actually
    945  1.1  christos  *	insert this page into the given map NOW.
    946  1.1  christos  */
    947  1.1  christos int
    948  1.1  christos pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    949  1.1  christos {
    950  1.1  christos 	pt_entry_t npte;
    951  1.1  christos 	const bool wired = (flags & PMAP_WIRED) != 0;
    952  1.1  christos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
    953  1.1  christos #ifdef UVMHIST
    954  1.1  christos 	struct kern_history * const histp =
    955  1.1  christos 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
    956  1.1  christos #endif
    957  1.1  christos 
    958  1.1  christos 	UVMHIST_FUNC(__func__);
    959  1.1  christos #define VM_PROT_STRING(prot) \
    960  1.1  christos 	&"\0    (R)\0  (W)\0  (RW)\0 (X)\0  (RX)\0 (WX)\0 (RWX)\0"[UVM_PROTECTION(prot)*6]
    961  1.1  christos 	UVMHIST_CALLED(*histp);
    962  1.1  christos 	UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR,
    963  1.1  christos 	    pmap, va, pa, 0);
    964  1.1  christos 	UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)",
    965  1.1  christos 	    prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags));
    966  1.1  christos 
    967  1.1  christos 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
    968  1.1  christos 	if (is_kernel_pmap_p) {
    969  1.1  christos 		PMAP_COUNT(kernel_mappings);
    970  1.1  christos 		if (!good_color)
    971  1.1  christos 			PMAP_COUNT(kernel_mappings_bad);
    972  1.1  christos 	} else {
    973  1.1  christos 		PMAP_COUNT(user_mappings);
    974  1.1  christos 		if (!good_color)
    975  1.1  christos 			PMAP_COUNT(user_mappings_bad);
    976  1.1  christos 	}
    977  1.1  christos #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
    978  1.1  christos 	if (va < pmap->pm_minaddr || va >= pmap->pm_maxaddr)
    979  1.1  christos 		panic("%s: %s %#"PRIxVADDR" too big",
    980  1.1  christos 		    __func__, is_kernel_pmap_p ? "kva" : "uva", va);
    981  1.1  christos #endif
    982  1.1  christos 
    983  1.1  christos 	KASSERTMSG(prot & VM_PROT_READ,
    984  1.1  christos 	    "%s: no READ (%#x) in prot %#x", __func__, VM_PROT_READ, prot);
    985  1.1  christos 
    986  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
    987  1.1  christos 	struct vm_page_md *mdpg;
    988  1.1  christos 
    989  1.1  christos 	if (pg) {
    990  1.1  christos 		mdpg = VM_PAGE_TO_MD(pg);
    991  1.1  christos 		/* Set page referenced/modified status based on flags */
    992  1.1  christos 		if (flags & VM_PROT_WRITE)
    993  1.1  christos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
    994  1.1  christos 		else if (flags & VM_PROT_ALL)
    995  1.1  christos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
    996  1.1  christos 
    997  1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
    998  1.1  christos 		if (!VM_PAGEMD_CACHED(pg))
    999  1.1  christos 			flags |= PMAP_NOCACHE;
   1000  1.1  christos #endif
   1001  1.1  christos 
   1002  1.1  christos 		PMAP_COUNT(managed_mappings);
   1003  1.1  christos 	} else {
   1004  1.1  christos 		/*
   1005  1.1  christos 		 * Assumption: if it is not part of our managed memory
   1006  1.1  christos 		 * then it must be device memory which may be volatile.
   1007  1.1  christos 		 */
   1008  1.1  christos 		mdpg = NULL;
   1009  1.1  christos 		flags |= PMAP_NOCACHE;
   1010  1.1  christos 		PMAP_COUNT(unmanaged_mappings);
   1011  1.1  christos 	}
   1012  1.1  christos 
   1013  1.1  christos 	npte = pte_make_enter(pa, mdpg, prot, flags, is_kernel_pmap_p);
   1014  1.1  christos 
   1015  1.1  christos 	kpreempt_disable();
   1016  1.1  christos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
   1017  1.1  christos 	if (__predict_false(ptep == NULL)) {
   1018  1.1  christos 		kpreempt_enable();
   1019  1.1  christos 		UVMHIST_LOG(*histp, "<- ENOMEM", 0,0,0,0);
   1020  1.1  christos 		return ENOMEM;
   1021  1.1  christos 	}
   1022  1.1  christos 	pt_entry_t opte = *ptep;
   1023  1.1  christos 
   1024  1.1  christos 	/* Done after case that may sleep/return. */
   1025  1.1  christos 	if (pg)
   1026  1.1  christos 		pmap_enter_pv(pmap, va, pg, &npte);
   1027  1.1  christos 
   1028  1.1  christos 	/*
   1029  1.1  christos 	 * Now validate mapping with desired protection/wiring.
   1030  1.1  christos 	 * Assume uniform modified and referenced status for all
   1031  1.1  christos 	 * MIPS pages in a MACH page.
   1032  1.1  christos 	 */
   1033  1.1  christos 	if (wired) {
   1034  1.1  christos 		pmap->pm_stats.wired_count++;
   1035  1.1  christos 		npte = pte_wire_entry(npte);
   1036  1.1  christos 	}
   1037  1.1  christos 
   1038  1.1  christos 	UVMHIST_LOG(*histp, "new pte %#x (pa %#"PRIxPADDR")", npte, pa, 0,0);
   1039  1.1  christos 
   1040  1.1  christos 	if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) {
   1041  1.1  christos 		pmap_remove(pmap, va, va + NBPG);
   1042  1.1  christos 		PMAP_COUNT(user_mappings_changed);
   1043  1.1  christos 	}
   1044  1.1  christos 
   1045  1.1  christos 	KASSERT(pte_valid_p(npte));
   1046  1.1  christos 	bool resident = pte_valid_p(opte);
   1047  1.1  christos 	if (!resident)
   1048  1.1  christos 		pmap->pm_stats.resident_count++;
   1049  1.1  christos 	*ptep = npte;
   1050  1.1  christos 
   1051  1.1  christos 	pmap_tlb_update_addr(pmap, va, npte,
   1052  1.1  christos 	    ((flags & VM_PROT_ALL) ? PMAP_TLB_INSERT : 0)
   1053  1.1  christos 	    | (resident ? PMAP_TLB_NEED_IPI : 0));
   1054  1.1  christos 	kpreempt_enable();
   1055  1.1  christos 
   1056  1.1  christos 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
   1057  1.1  christos 		KASSERT(mdpg != NULL);
   1058  1.1  christos 		PMAP_COUNT(exec_mappings);
   1059  1.1  christos 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
   1060  1.1  christos 			if (!pte_deferred_exec_p(npte)) {
   1061  1.1  christos 				UVMHIST_LOG(*histp,
   1062  1.1  christos 				    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1063  1.1  christos 				    va, pg, "immediate", "");
   1064  1.1  christos 				pmap_page_syncicache(pg);
   1065  1.1  christos 				pmap_page_set_attributes(mdpg,
   1066  1.1  christos 				    VM_PAGEMD_EXECPAGE);
   1067  1.1  christos 				PMAP_COUNT(exec_synced_mappings);
   1068  1.1  christos 			} else {
   1069  1.1  christos 				UVMHIST_LOG(*histp, "va=%#"PRIxVADDR
   1070  1.1  christos 				    " pg %p: %s syncicache: pte %#x",
   1071  1.1  christos 				    va, pg, "defer", npte);
   1072  1.1  christos 			}
   1073  1.1  christos 		} else {
   1074  1.1  christos 			UVMHIST_LOG(*histp,
   1075  1.1  christos 			    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1076  1.1  christos 			    va, pg, "no",
   1077  1.1  christos 			    (pte_cached_p(npte)
   1078  1.1  christos 				? " (already exec)"
   1079  1.1  christos 				: " (uncached)"));
   1080  1.1  christos 		}
   1081  1.1  christos 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
   1082  1.1  christos 		KASSERT(mdpg != NULL);
   1083  1.1  christos 		KASSERT(prot & VM_PROT_WRITE);
   1084  1.1  christos 		PMAP_COUNT(exec_mappings);
   1085  1.1  christos 		pmap_page_syncicache(pg);
   1086  1.1  christos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1087  1.1  christos 		UVMHIST_LOG(pmapexechist,
   1088  1.1  christos 		    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
   1089  1.1  christos 		    va, pg, "immediate", " (writeable)");
   1090  1.1  christos 	}
   1091  1.1  christos 
   1092  1.1  christos 	if (prot & VM_PROT_EXECUTE) {
   1093  1.1  christos 		UVMHIST_LOG(pmapexechist, "<- 0 (OK)", 0,0,0,0);
   1094  1.1  christos 	} else {
   1095  1.1  christos 		UVMHIST_LOG(pmaphist, "<- 0 (OK)", 0,0,0,0);
   1096  1.1  christos 	}
   1097  1.1  christos 	return 0;
   1098  1.1  christos }
   1099  1.1  christos 
   1100  1.1  christos void
   1101  1.1  christos pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1102  1.1  christos {
   1103  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1104  1.1  christos 	struct vm_page_md *mdpg;
   1105  1.1  christos 
   1106  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1107  1.1  christos 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" pa=%#"PRIxPADDR
   1108  1.1  christos 	    ", prot=%#x, flags=%#x)", va, pa, prot, flags);
   1109  1.1  christos 	PMAP_COUNT(kenter_pa);
   1110  1.1  christos 
   1111  1.1  christos 	if (pg == NULL) {
   1112  1.1  christos 		mdpg = NULL;
   1113  1.1  christos 		PMAP_COUNT(kenter_pa_unmanaged);
   1114  1.1  christos 		flags |= PMAP_NOCACHE;
   1115  1.1  christos 	} else {
   1116  1.1  christos 		mdpg = VM_PAGE_TO_MD(pg);
   1117  1.1  christos 	}
   1118  1.1  christos 
   1119  1.1  christos 	if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
   1120  1.1  christos 		PMAP_COUNT(kenter_pa_bad);
   1121  1.1  christos 
   1122  1.1  christos 	const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
   1123  1.1  christos 	kpreempt_disable();
   1124  1.1  christos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0);
   1125  1.1  christos 	KASSERT(ptep != NULL);
   1126  1.1  christos 	KASSERT(!pte_valid_p(*ptep));
   1127  1.1  christos 	*ptep = npte;
   1128  1.1  christos 	/*
   1129  1.1  christos 	 * We have the option to force this mapping into the TLB but we
   1130  1.1  christos 	 * don't.  Instead let the next reference to the page do it.
   1131  1.1  christos 	 */
   1132  1.1  christos 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
   1133  1.1  christos 	kpreempt_enable();
   1134  1.1  christos #if DEBUG > 1
   1135  1.1  christos 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
   1136  1.1  christos 		if (((long *)va)[i] != ((long *)pa)[i])
   1137  1.1  christos 			panic("%s: contents (%lx) of va %#"PRIxVADDR
   1138  1.1  christos 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
   1139  1.1  christos 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
   1140  1.1  christos 	}
   1141  1.1  christos #endif
   1142  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1143  1.1  christos }
   1144  1.1  christos 
   1145  1.1  christos static bool
   1146  1.1  christos pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
   1147  1.1  christos 	uintptr_t flags)
   1148  1.1  christos {
   1149  1.1  christos 	const pt_entry_t new_pt_entry = pte_nv_entry(true);
   1150  1.1  christos 
   1151  1.1  christos 	KASSERT(kpreempt_disabled());
   1152  1.1  christos 
   1153  1.1  christos 	/*
   1154  1.1  christos 	 * Set every pt on every valid mapping within this segment.
   1155  1.1  christos 	 */
   1156  1.1  christos 	for (; sva < eva; sva += NBPG, ptep++) {
   1157  1.1  christos 		pt_entry_t pt_entry = *ptep;
   1158  1.1  christos 		if (!pte_valid_p(pt_entry)) {
   1159  1.1  christos 			continue;
   1160  1.1  christos 		}
   1161  1.1  christos 
   1162  1.1  christos 		PMAP_COUNT(kremove_pages);
   1163  1.1  christos 		struct vm_page * const pg =
   1164  1.1  christos 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
   1165  1.1  christos 		if (pg != NULL)
   1166  1.1  christos 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
   1167  1.1  christos 
   1168  1.1  christos 		*ptep = new_pt_entry;
   1169  1.1  christos 		pmap_tlb_invalidate_addr(pmap_kernel(), sva);
   1170  1.1  christos 	}
   1171  1.1  christos 
   1172  1.1  christos 	return false;
   1173  1.1  christos }
   1174  1.1  christos 
   1175  1.1  christos void
   1176  1.1  christos pmap_kremove(vaddr_t va, vsize_t len)
   1177  1.1  christos {
   1178  1.1  christos 	const vaddr_t sva = trunc_page(va);
   1179  1.1  christos 	const vaddr_t eva = round_page(va + len);
   1180  1.1  christos 
   1181  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1182  1.1  christos 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" len=%#"PRIxVSIZE")",
   1183  1.1  christos 	    va, len, 0,0);
   1184  1.1  christos 
   1185  1.1  christos 	kpreempt_disable();
   1186  1.1  christos 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
   1187  1.1  christos 	kpreempt_enable();
   1188  1.1  christos 
   1189  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1190  1.1  christos }
   1191  1.1  christos 
   1192  1.1  christos void
   1193  1.1  christos pmap_remove_all(struct pmap *pmap)
   1194  1.1  christos {
   1195  1.1  christos 	KASSERT(pmap != pmap_kernel());
   1196  1.1  christos 
   1197  1.1  christos 	kpreempt_disable();
   1198  1.1  christos 	/*
   1199  1.1  christos 	 * Free all of our ASIDs which means we can skip doing all the
   1200  1.1  christos 	 * tlb_invalidate_addrs().
   1201  1.1  christos 	 */
   1202  1.1  christos 	pmap_tlb_asid_deactivate(pmap);
   1203  1.1  christos 	pmap_tlb_asid_release_all(pmap);
   1204  1.1  christos 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
   1205  1.1  christos 
   1206  1.1  christos 	kpreempt_enable();
   1207  1.1  christos }
   1208  1.1  christos 
   1209  1.1  christos /*
   1210  1.1  christos  *	Routine:	pmap_unwire
   1211  1.1  christos  *	Function:	Clear the wired attribute for a map/virtual-address
   1212  1.1  christos  *			pair.
   1213  1.1  christos  *	In/out conditions:
   1214  1.1  christos  *			The mapping must already exist in the pmap.
   1215  1.1  christos  */
   1216  1.1  christos void
   1217  1.1  christos pmap_unwire(pmap_t pmap, vaddr_t va)
   1218  1.1  christos {
   1219  1.1  christos 
   1220  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1221  1.1  christos 	UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1222  1.1  christos 	PMAP_COUNT(unwire);
   1223  1.1  christos 
   1224  1.1  christos 	/*
   1225  1.1  christos 	 * Don't need to flush the TLB since PG_WIRED is only in software.
   1226  1.1  christos 	 */
   1227  1.1  christos #ifdef PARANOIADIAG
   1228  1.1  christos 	if (va < pmap->pm_minaddr || pmap->pm_maxaddr <= va)
   1229  1.1  christos 		panic("pmap_unwire");
   1230  1.1  christos #endif
   1231  1.1  christos 	kpreempt_disable();
   1232  1.1  christos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1233  1.1  christos 	pt_entry_t pt_entry = *ptep;
   1234  1.1  christos #ifdef DIAGNOSTIC
   1235  1.1  christos 	if (ptep == NULL)
   1236  1.1  christos 		panic("%s: pmap %p va %#"PRIxVADDR" invalid STE",
   1237  1.1  christos 		    __func__, pmap, va);
   1238  1.1  christos #endif
   1239  1.1  christos 
   1240  1.1  christos #ifdef DIAGNOSTIC
   1241  1.1  christos 	if (!pte_valid_p(pt_entry))
   1242  1.1  christos 		panic("pmap_unwire: pmap %p va %#"PRIxVADDR" invalid PTE",
   1243  1.1  christos 		    pmap, va);
   1244  1.1  christos #endif
   1245  1.1  christos 
   1246  1.1  christos 	if (pte_wired_p(pt_entry)) {
   1247  1.1  christos 		*ptep = pte_unwire_entry(*ptep);
   1248  1.1  christos 		pmap->pm_stats.wired_count--;
   1249  1.1  christos 	}
   1250  1.1  christos #ifdef DIAGNOSTIC
   1251  1.1  christos 	else {
   1252  1.1  christos 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
   1253  1.1  christos 		    __func__, pmap, va);
   1254  1.1  christos 	}
   1255  1.1  christos #endif
   1256  1.1  christos 	kpreempt_enable();
   1257  1.1  christos }
   1258  1.1  christos 
   1259  1.1  christos /*
   1260  1.1  christos  *	Routine:	pmap_extract
   1261  1.1  christos  *	Function:
   1262  1.1  christos  *		Extract the physical page address associated
   1263  1.1  christos  *		with the given map/virtual_address pair.
   1264  1.1  christos  */
   1265  1.1  christos bool
   1266  1.1  christos pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1267  1.1  christos {
   1268  1.1  christos 	paddr_t pa;
   1269  1.1  christos 
   1270  1.1  christos 	//UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1271  1.1  christos 	//UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
   1272  1.1  christos 	if (pmap == pmap_kernel()) {
   1273  1.1  christos 		if (pmap_md_direct_mapped_vaddr_p(va)) {
   1274  1.1  christos 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1275  1.1  christos 			goto done;
   1276  1.1  christos 		}
   1277  1.1  christos 		if (pmap_md_io_vaddr_p(va))
   1278  1.1  christos 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
   1279  1.1  christos 	}
   1280  1.1  christos 	kpreempt_disable();
   1281  1.1  christos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1282  1.1  christos 	if (ptep == NULL) {
   1283  1.1  christos 		//UVMHIST_LOG(pmaphist, "<- false (not in segmap)", 0,0,0,0);
   1284  1.1  christos 		kpreempt_enable();
   1285  1.1  christos 		return false;
   1286  1.1  christos 	}
   1287  1.1  christos 	if (!pte_valid_p(*ptep)) {
   1288  1.1  christos 		//UVMHIST_LOG(pmaphist, "<- false (PTE not valid)", 0,0,0,0);
   1289  1.1  christos 		kpreempt_enable();
   1290  1.1  christos 		return false;
   1291  1.1  christos 	}
   1292  1.1  christos 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
   1293  1.1  christos 	kpreempt_enable();
   1294  1.1  christos done:
   1295  1.1  christos 	if (pap != NULL) {
   1296  1.1  christos 		*pap = pa;
   1297  1.1  christos 	}
   1298  1.1  christos 	//UVMHIST_LOG(pmaphist, "<- true (pa %#"PRIxPADDR")", pa, 0,0,0);
   1299  1.1  christos 	return true;
   1300  1.1  christos }
   1301  1.1  christos 
   1302  1.1  christos /*
   1303  1.1  christos  *	Copy the range specified by src_addr/len
   1304  1.1  christos  *	from the source map to the range dst_addr/len
   1305  1.1  christos  *	in the destination map.
   1306  1.1  christos  *
   1307  1.1  christos  *	This routine is only advisory and need not do anything.
   1308  1.1  christos  */
   1309  1.1  christos void
   1310  1.1  christos pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1311  1.1  christos     vaddr_t src_addr)
   1312  1.1  christos {
   1313  1.1  christos 
   1314  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1315  1.1  christos 	PMAP_COUNT(copy);
   1316  1.1  christos }
   1317  1.1  christos 
   1318  1.1  christos /*
   1319  1.1  christos  *	pmap_clear_reference:
   1320  1.1  christos  *
   1321  1.1  christos  *	Clear the reference bit on the specified physical page.
   1322  1.1  christos  */
   1323  1.1  christos bool
   1324  1.1  christos pmap_clear_reference(struct vm_page *pg)
   1325  1.1  christos {
   1326  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1327  1.1  christos 
   1328  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1329  1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))",
   1330  1.1  christos 	   pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1331  1.1  christos 
   1332  1.1  christos 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
   1333  1.1  christos 
   1334  1.1  christos 	UVMHIST_LOG(pmaphist, "<- %s", rv ? "true" : "false", 0,0,0);
   1335  1.1  christos 
   1336  1.1  christos 	return rv;
   1337  1.1  christos }
   1338  1.1  christos 
   1339  1.1  christos /*
   1340  1.1  christos  *	pmap_is_referenced:
   1341  1.1  christos  *
   1342  1.1  christos  *	Return whether or not the specified physical page is referenced
   1343  1.1  christos  *	by any physical maps.
   1344  1.1  christos  */
   1345  1.1  christos bool
   1346  1.1  christos pmap_is_referenced(struct vm_page *pg)
   1347  1.1  christos {
   1348  1.1  christos 
   1349  1.1  christos 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
   1350  1.1  christos }
   1351  1.1  christos 
   1352  1.1  christos /*
   1353  1.1  christos  *	Clear the modify bits on the specified physical page.
   1354  1.1  christos  */
   1355  1.1  christos bool
   1356  1.1  christos pmap_clear_modify(struct vm_page *pg)
   1357  1.1  christos {
   1358  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1359  1.1  christos 	pv_entry_t pv = &mdpg->mdpg_first;
   1360  1.1  christos 	pv_entry_t pv_next;
   1361  1.1  christos 	uint16_t gen;
   1362  1.1  christos 
   1363  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1364  1.1  christos 	UVMHIST_LOG(pmaphist, "(pg=%p (%#"PRIxPADDR"))",
   1365  1.1  christos 	    pg, VM_PAGE_TO_PHYS(pg), 0,0);
   1366  1.1  christos 	PMAP_COUNT(clear_modify);
   1367  1.1  christos 
   1368  1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
   1369  1.1  christos 		if (pv->pv_pmap == NULL) {
   1370  1.1  christos 			UVMHIST_LOG(pmapexechist,
   1371  1.1  christos 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1372  1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0);
   1373  1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1374  1.1  christos 			PMAP_COUNT(exec_uncached_clear_modify);
   1375  1.1  christos 		} else {
   1376  1.1  christos 			UVMHIST_LOG(pmapexechist,
   1377  1.1  christos 			    "pg %p (pa %#"PRIxPADDR"): %s",
   1378  1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0);
   1379  1.1  christos 			pmap_page_syncicache(pg);
   1380  1.1  christos 			PMAP_COUNT(exec_synced_clear_modify);
   1381  1.1  christos 		}
   1382  1.1  christos 	}
   1383  1.1  christos 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
   1384  1.1  christos 		UVMHIST_LOG(pmaphist, "<- false", 0,0,0,0);
   1385  1.1  christos 		return false;
   1386  1.1  christos 	}
   1387  1.1  christos 	if (pv->pv_pmap == NULL) {
   1388  1.1  christos 		UVMHIST_LOG(pmaphist, "<- true (no mappings)", 0,0,0,0);
   1389  1.1  christos 		return true;
   1390  1.1  christos 	}
   1391  1.1  christos 
   1392  1.1  christos 	/*
   1393  1.1  christos 	 * remove write access from any pages that are dirty
   1394  1.1  christos 	 * so we can tell if they are written to again later.
   1395  1.1  christos 	 * flush the VAC first if there is one.
   1396  1.1  christos 	 */
   1397  1.1  christos 	kpreempt_disable();
   1398  1.1  christos 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, false);
   1399  1.1  christos 	for (; pv != NULL; pv = pv_next) {
   1400  1.1  christos 		pmap_t pmap = pv->pv_pmap;
   1401  1.1  christos 		vaddr_t va = pv->pv_va;
   1402  1.1  christos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
   1403  1.1  christos 		KASSERT(ptep);
   1404  1.1  christos 		pv_next = pv->pv_next;
   1405  1.1  christos 		pt_entry_t pt_entry = pte_prot_nowrite(*ptep);
   1406  1.1  christos 		if (*ptep == pt_entry) {
   1407  1.1  christos 			continue;
   1408  1.1  christos 		}
   1409  1.1  christos 		pmap_md_vca_clean(pg, va, PMAP_WBINV);
   1410  1.1  christos 		*ptep = pt_entry;
   1411  1.1  christos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1412  1.1  christos 		pmap_tlb_invalidate_addr(pmap, va);
   1413  1.1  christos 		pmap_update(pmap);
   1414  1.1  christos 		if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) {
   1415  1.1  christos 			/*
   1416  1.1  christos 			 * The list changed!  So restart from the beginning.
   1417  1.1  christos 			 */
   1418  1.1  christos 			pv_next = &mdpg->mdpg_first;
   1419  1.1  christos 		}
   1420  1.1  christos 	}
   1421  1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1422  1.1  christos 	kpreempt_enable();
   1423  1.1  christos 
   1424  1.1  christos 	UVMHIST_LOG(pmaphist, "<- true (mappings changed)", 0,0,0,0);
   1425  1.1  christos 	return true;
   1426  1.1  christos }
   1427  1.1  christos 
   1428  1.1  christos /*
   1429  1.1  christos  *	pmap_is_modified:
   1430  1.1  christos  *
   1431  1.1  christos  *	Return whether or not the specified physical page is modified
   1432  1.1  christos  *	by any physical maps.
   1433  1.1  christos  */
   1434  1.1  christos bool
   1435  1.1  christos pmap_is_modified(struct vm_page *pg)
   1436  1.1  christos {
   1437  1.1  christos 
   1438  1.1  christos 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
   1439  1.1  christos }
   1440  1.1  christos 
   1441  1.1  christos /*
   1442  1.1  christos  *	pmap_set_modified:
   1443  1.1  christos  *
   1444  1.1  christos  *	Sets the page modified reference bit for the specified page.
   1445  1.1  christos  */
   1446  1.1  christos void
   1447  1.1  christos pmap_set_modified(paddr_t pa)
   1448  1.1  christos {
   1449  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1450  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1451  1.1  christos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
   1452  1.1  christos }
   1453  1.1  christos 
   1454  1.1  christos /******************** pv_entry management ********************/
   1455  1.1  christos 
   1456  1.1  christos static void
   1457  1.1  christos pmap_check_pvlist(struct vm_page *pg)
   1458  1.1  christos {
   1459  1.1  christos #ifdef PARANOIADIAG
   1460  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1461  1.1  christos 	pt_entry_t pv = &mdpg->mdpg_first;
   1462  1.1  christos 	if (pv->pv_pmap != NULL) {
   1463  1.1  christos 		for (; pv != NULL; pv = pv->pv_next) {
   1464  1.1  christos 			KASSERT(!pmap_md_direct_mapped_vaddr_p(pv->pv_va));
   1465  1.1  christos 		}
   1466  1.1  christos 	}
   1467  1.1  christos #endif /* PARANOIADIAG */
   1468  1.1  christos }
   1469  1.1  christos 
   1470  1.1  christos /*
   1471  1.1  christos  * Enter the pmap and virtual address into the
   1472  1.1  christos  * physical to virtual map table.
   1473  1.1  christos  */
   1474  1.1  christos void
   1475  1.1  christos pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
   1476  1.1  christos {
   1477  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1478  1.1  christos 	pv_entry_t pv, npv, apv;
   1479  1.1  christos 	int16_t gen;
   1480  1.1  christos 	bool first = false;
   1481  1.1  christos 
   1482  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1483  1.1  christos 	UVMHIST_LOG(pmaphist,
   1484  1.1  christos 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (%#"PRIxPADDR")",
   1485  1.1  christos 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1486  1.1  christos 	UVMHIST_LOG(pmaphist, "nptep=%p (%#x))", npte, *npte, 0, 0);
   1487  1.1  christos 
   1488  1.1  christos 	KASSERT(kpreempt_disabled());
   1489  1.1  christos 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
   1490  1.1  christos 
   1491  1.1  christos 	apv = NULL;
   1492  1.1  christos 	pv = &mdpg->mdpg_first;
   1493  1.1  christos 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1494  1.1  christos 	pmap_check_pvlist(pg);
   1495  1.1  christos again:
   1496  1.1  christos 	if (pv->pv_pmap == NULL) {
   1497  1.1  christos 		KASSERT(pv->pv_next == NULL);
   1498  1.1  christos 		/*
   1499  1.1  christos 		 * No entries yet, use header as the first entry
   1500  1.1  christos 		 */
   1501  1.1  christos 		PMAP_COUNT(primary_mappings);
   1502  1.1  christos 		PMAP_COUNT(mappings);
   1503  1.1  christos 		first = true;
   1504  1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1505  1.1  christos 		pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1506  1.1  christos #endif
   1507  1.1  christos 		pv->pv_pmap = pmap;
   1508  1.1  christos 		pv->pv_va = va;
   1509  1.1  christos 	} else {
   1510  1.1  christos 		if (pmap_md_vca_add(pg, va, npte))
   1511  1.1  christos 			goto again;
   1512  1.1  christos 
   1513  1.1  christos 		/*
   1514  1.1  christos 		 * There is at least one other VA mapping this page.
   1515  1.1  christos 		 * Place this entry after the header.
   1516  1.1  christos 		 *
   1517  1.1  christos 		 * Note: the entry may already be in the table if
   1518  1.1  christos 		 * we are only changing the protection bits.
   1519  1.1  christos 		 */
   1520  1.1  christos 
   1521  1.1  christos #ifdef PARANOIADIAG
   1522  1.1  christos 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1523  1.1  christos #endif
   1524  1.1  christos 		for (npv = pv; npv; npv = npv->pv_next) {
   1525  1.1  christos 			if (pmap == npv->pv_pmap && va == npv->pv_va) {
   1526  1.1  christos #ifdef PARANOIADIAG
   1527  1.1  christos 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
   1528  1.1  christos 				pt_entry_t pt_entry = (ptep ? *ptep : 0);
   1529  1.1  christos 				if (!pte_valid_p(pt_entry)
   1530  1.1  christos 				    || pte_to_paddr(pt_entry) != pa)
   1531  1.1  christos 					printf(
   1532  1.1  christos 		"pmap_enter_pv: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n",
   1533  1.1  christos 					    va, pa, pt_entry);
   1534  1.1  christos #endif
   1535  1.1  christos 				PMAP_COUNT(remappings);
   1536  1.1  christos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1537  1.1  christos 				if (__predict_false(apv != NULL))
   1538  1.1  christos 					pmap_pv_free(apv);
   1539  1.1  christos 				return;
   1540  1.1  christos 			}
   1541  1.1  christos 		}
   1542  1.1  christos 		if (__predict_true(apv == NULL)) {
   1543  1.1  christos 			/*
   1544  1.1  christos 			 * To allocate a PV, we have to release the PVLIST lock
   1545  1.1  christos 			 * so get the page generation.  We allocate the PV, and
   1546  1.1  christos 			 * then reacquire the lock.
   1547  1.1  christos 			 */
   1548  1.1  christos 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1549  1.1  christos 
   1550  1.1  christos 			apv = (pv_entry_t)pmap_pv_alloc();
   1551  1.1  christos 			if (apv == NULL)
   1552  1.1  christos 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
   1553  1.1  christos 
   1554  1.1  christos 			/*
   1555  1.1  christos 			 * If the generation has changed, then someone else
   1556  1.1  christos 			 * tinkered with this page so we should
   1557  1.1  christos 			 * start over.
   1558  1.1  christos 			 */
   1559  1.1  christos 			uint16_t oldgen = gen;
   1560  1.1  christos 			gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1561  1.1  christos 			if (gen != oldgen)
   1562  1.1  christos 				goto again;
   1563  1.1  christos 		}
   1564  1.1  christos 		npv = apv;
   1565  1.1  christos 		apv = NULL;
   1566  1.1  christos 		npv->pv_va = va;
   1567  1.1  christos 		npv->pv_pmap = pmap;
   1568  1.1  christos 		npv->pv_next = pv->pv_next;
   1569  1.1  christos 		pv->pv_next = npv;
   1570  1.1  christos 		PMAP_COUNT(mappings);
   1571  1.1  christos 	}
   1572  1.1  christos 	pmap_check_pvlist(pg);
   1573  1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1574  1.1  christos 	if (__predict_false(apv != NULL))
   1575  1.1  christos 		pmap_pv_free(apv);
   1576  1.1  christos 
   1577  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done pv=%p%s",
   1578  1.1  christos 	    pv, first ? " (first pv)" : "",0,0);
   1579  1.1  christos }
   1580  1.1  christos 
   1581  1.1  christos /*
   1582  1.1  christos  * Remove a physical to virtual address translation.
   1583  1.1  christos  * If cache was inhibited on this page, and there are no more cache
   1584  1.1  christos  * conflicts, restore caching.
   1585  1.1  christos  * Flush the cache if the last page is removed (should always be cached
   1586  1.1  christos  * at this point).
   1587  1.1  christos  */
   1588  1.1  christos void
   1589  1.1  christos pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
   1590  1.1  christos {
   1591  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1592  1.1  christos 	pv_entry_t pv, npv;
   1593  1.1  christos 	bool last;
   1594  1.1  christos 
   1595  1.1  christos 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
   1596  1.1  christos 	UVMHIST_LOG(pmaphist,
   1597  1.1  christos 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (pa %#"PRIxPADDR")\n",
   1598  1.1  christos 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
   1599  1.1  christos 	UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0,0,0);
   1600  1.1  christos 
   1601  1.1  christos 	KASSERT(kpreempt_disabled());
   1602  1.1  christos 	pv = &mdpg->mdpg_first;
   1603  1.1  christos 
   1604  1.1  christos 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, true);
   1605  1.1  christos 	pmap_check_pvlist(pg);
   1606  1.1  christos 
   1607  1.1  christos 	/*
   1608  1.1  christos 	 * If it is the first entry on the list, it is actually
   1609  1.1  christos 	 * in the header and we must copy the following entry up
   1610  1.1  christos 	 * to the header.  Otherwise we must search the list for
   1611  1.1  christos 	 * the entry.  In either case we free the now unused entry.
   1612  1.1  christos 	 */
   1613  1.1  christos 
   1614  1.1  christos 	last = false;
   1615  1.1  christos 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
   1616  1.1  christos 		npv = pv->pv_next;
   1617  1.1  christos 		if (npv) {
   1618  1.1  christos 			*pv = *npv;
   1619  1.1  christos 			KASSERT(pv->pv_pmap != NULL);
   1620  1.1  christos 		} else {
   1621  1.1  christos #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
   1622  1.1  christos 			pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
   1623  1.1  christos #endif
   1624  1.1  christos 			pv->pv_pmap = NULL;
   1625  1.1  christos 			last = true;	/* Last mapping removed */
   1626  1.1  christos 		}
   1627  1.1  christos 		PMAP_COUNT(remove_pvfirst);
   1628  1.1  christos 	} else {
   1629  1.1  christos 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
   1630  1.1  christos 			PMAP_COUNT(remove_pvsearch);
   1631  1.1  christos 			if (pmap == npv->pv_pmap && va == npv->pv_va)
   1632  1.1  christos 				break;
   1633  1.1  christos 		}
   1634  1.1  christos 		if (npv) {
   1635  1.1  christos 			pv->pv_next = npv->pv_next;
   1636  1.1  christos 		}
   1637  1.1  christos 	}
   1638  1.1  christos 	pmap_md_vca_remove(pg, va);
   1639  1.1  christos 
   1640  1.1  christos 	pmap_check_pvlist(pg);
   1641  1.1  christos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
   1642  1.1  christos 
   1643  1.1  christos 	/*
   1644  1.1  christos 	 * Free the pv_entry if needed.
   1645  1.1  christos 	 */
   1646  1.1  christos 	if (npv)
   1647  1.1  christos 		pmap_pv_free(npv);
   1648  1.1  christos 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
   1649  1.1  christos 		if (last) {
   1650  1.1  christos 			/*
   1651  1.1  christos 			 * If this was the page's last mapping, we no longer
   1652  1.1  christos 			 * care about its execness.
   1653  1.1  christos 			 */
   1654  1.1  christos 			UVMHIST_LOG(pmapexechist,
   1655  1.1  christos 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1656  1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg),
   1657  1.1  christos 			    last ? " [last mapping]" : "",
   1658  1.1  christos 			    "execpage cleared");
   1659  1.1  christos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
   1660  1.1  christos 			PMAP_COUNT(exec_uncached_remove);
   1661  1.1  christos 		} else {
   1662  1.1  christos 			/*
   1663  1.1  christos 			 * Someone still has it mapped as an executable page
   1664  1.1  christos 			 * so we must sync it.
   1665  1.1  christos 			 */
   1666  1.1  christos 			UVMHIST_LOG(pmapexechist,
   1667  1.1  christos 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
   1668  1.1  christos 			    pg, VM_PAGE_TO_PHYS(pg),
   1669  1.1  christos 			    last ? " [last mapping]" : "",
   1670  1.1  christos 			    "performed syncicache");
   1671  1.1  christos 			pmap_page_syncicache(pg);
   1672  1.1  christos 			PMAP_COUNT(exec_synced_remove);
   1673  1.1  christos 		}
   1674  1.1  christos 	}
   1675  1.1  christos 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
   1676  1.1  christos }
   1677  1.1  christos 
   1678  1.1  christos #if defined(MULTIPROCESSOR)
   1679  1.1  christos struct pmap_pvlist_info {
   1680  1.1  christos 	kmutex_t *pli_locks[PAGE_SIZE / 32];
   1681  1.1  christos 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
   1682  1.1  christos 	volatile u_int pli_lock_index;
   1683  1.1  christos 	u_int pli_lock_mask;
   1684  1.1  christos } pmap_pvlist_info;
   1685  1.1  christos 
   1686  1.1  christos void
   1687  1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   1688  1.1  christos {
   1689  1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1690  1.1  christos 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
   1691  1.1  christos 	vaddr_t lock_va = lock_page;
   1692  1.1  christos 	if (sizeof(kmutex_t) > cache_line_size) {
   1693  1.1  christos 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
   1694  1.1  christos 	}
   1695  1.1  christos 	const size_t nlocks = PAGE_SIZE / cache_line_size;
   1696  1.1  christos 	KASSERT((nlocks & (nlocks - 1)) == 0);
   1697  1.1  christos 	/*
   1698  1.1  christos 	 * Now divide the page into a number of mutexes, one per cacheline.
   1699  1.1  christos 	 */
   1700  1.1  christos 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
   1701  1.1  christos 		kmutex_t * const lock = (kmutex_t *)lock_va;
   1702  1.1  christos 		mutex_init(lock, MUTEX_DEFAULT, IPL_VM);
   1703  1.1  christos 		pli->pli_locks[i] = lock;
   1704  1.1  christos 	}
   1705  1.1  christos 	pli->pli_lock_mask = nlocks - 1;
   1706  1.1  christos }
   1707  1.1  christos 
   1708  1.1  christos uint16_t
   1709  1.1  christos pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1710  1.1  christos {
   1711  1.1  christos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
   1712  1.1  christos 	kmutex_t *lock = mdpg->mdpg_lock;
   1713  1.1  christos 	int16_t gen;
   1714  1.1  christos 
   1715  1.1  christos 	/*
   1716  1.1  christos 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
   1717  1.1  christos 	 * semi-random distribution not based on page color.
   1718  1.1  christos 	 */
   1719  1.1  christos 	if (__predict_false(lock == NULL)) {
   1720  1.1  christos 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
   1721  1.1  christos 		size_t lockid = locknum & pli->pli_lock_mask;
   1722  1.1  christos 		kmutex_t * const new_lock = pli->pli_locks[lockid];
   1723  1.1  christos 		/*
   1724  1.1  christos 		 * Set the lock.  If some other thread already did, just use
   1725  1.1  christos 		 * the one they assigned.
   1726  1.1  christos 		 */
   1727  1.1  christos 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
   1728  1.1  christos 		if (lock == NULL) {
   1729  1.1  christos 			lock = new_lock;
   1730  1.1  christos 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
   1731  1.1  christos 		}
   1732  1.1  christos 	}
   1733  1.1  christos 
   1734  1.1  christos 	/*
   1735  1.1  christos 	 * Now finally lock the pvlists.
   1736  1.1  christos 	 */
   1737  1.1  christos 	mutex_spin_enter(lock);
   1738  1.1  christos 
   1739  1.1  christos 	/*
   1740  1.1  christos 	 * If the locker will be changing the list, increment the high 16 bits
   1741  1.1  christos 	 * of attrs so we use that as a generation number.
   1742  1.1  christos 	 */
   1743  1.1  christos 	gen = VM_PAGEMD_PVLIST_GEN(mdpg);		/* get old value */
   1744  1.1  christos 	if (list_change)
   1745  1.1  christos 		atomic_add_int(&mdpg->mdpg_attrs, 0x10000);
   1746  1.1  christos 
   1747  1.1  christos 	/*
   1748  1.1  christos 	 * Return the generation number.
   1749  1.1  christos 	 */
   1750  1.1  christos 	return gen;
   1751  1.1  christos }
   1752  1.1  christos #else /* !MULTIPROCESSOR */
   1753  1.1  christos void
   1754  1.1  christos pmap_pvlist_lock_init(size_t cache_line_size)
   1755  1.1  christos {
   1756  1.1  christos 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM);
   1757  1.1  christos }
   1758  1.1  christos 
   1759  1.1  christos #ifdef MODULAR
   1760  1.1  christos uint16_t
   1761  1.1  christos pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
   1762  1.1  christos {
   1763  1.1  christos 	/*
   1764  1.1  christos 	 * We just use a global lock.
   1765  1.1  christos 	 */
   1766  1.1  christos 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
   1767  1.1  christos 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
   1768  1.1  christos 	}
   1769  1.1  christos 
   1770  1.1  christos 	/*
   1771  1.1  christos 	 * Now finally lock the pvlists.
   1772  1.1  christos 	 */
   1773  1.1  christos 	mutex_spin_enter(mdpg->mdpg_lock);
   1774  1.1  christos 
   1775  1.1  christos 	return 0;
   1776  1.1  christos }
   1777  1.1  christos #endif /* MODULAR */
   1778  1.1  christos #endif /* !MULTIPROCESSOR */
   1779  1.1  christos 
   1780  1.1  christos /*
   1781  1.1  christos  * pmap_pv_page_alloc:
   1782  1.1  christos  *
   1783  1.1  christos  *	Allocate a page for the pv_entry pool.
   1784  1.1  christos  */
   1785  1.1  christos void *
   1786  1.1  christos pmap_pv_page_alloc(struct pool *pp, int flags)
   1787  1.1  christos {
   1788  1.1  christos 	struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
   1789  1.1  christos 	if (pg == NULL)
   1790  1.1  christos 		return NULL;
   1791  1.1  christos 
   1792  1.1  christos 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
   1793  1.1  christos }
   1794  1.1  christos 
   1795  1.1  christos /*
   1796  1.1  christos  * pmap_pv_page_free:
   1797  1.1  christos  *
   1798  1.1  christos  *	Free a pv_entry pool page.
   1799  1.1  christos  */
   1800  1.1  christos void
   1801  1.1  christos pmap_pv_page_free(struct pool *pp, void *v)
   1802  1.1  christos {
   1803  1.1  christos 	vaddr_t va = (vaddr_t)v;
   1804  1.1  christos 
   1805  1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1806  1.1  christos 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1807  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1808  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1809  1.1  christos 	pmap_md_vca_remove(pg, va);
   1810  1.1  christos 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1811  1.1  christos 	uvm_pagefree(pg);
   1812  1.1  christos }
   1813  1.1  christos 
   1814  1.1  christos #ifdef PMAP_PREFER
   1815  1.1  christos /*
   1816  1.1  christos  * Find first virtual address >= *vap that doesn't cause
   1817  1.1  christos  * a cache alias conflict.
   1818  1.1  christos  */
   1819  1.1  christos void
   1820  1.1  christos pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
   1821  1.1  christos {
   1822  1.1  christos 	vaddr_t	va;
   1823  1.1  christos 	vsize_t d;
   1824  1.1  christos 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
   1825  1.1  christos 
   1826  1.1  christos 	PMAP_COUNT(prefer_requests);
   1827  1.1  christos 
   1828  1.1  christos 	prefer_mask |= pmap_md_cache_prefer_mask();
   1829  1.1  christos 
   1830  1.1  christos 	if (prefer_mask) {
   1831  1.1  christos 		va = *vap;
   1832  1.1  christos 
   1833  1.1  christos 		d = foff - va;
   1834  1.1  christos 		d &= prefer_mask;
   1835  1.1  christos 		if (d) {
   1836  1.1  christos 			if (td)
   1837  1.1  christos 				*vap = trunc_page(va -((-d) & prefer_mask));
   1838  1.1  christos 			else
   1839  1.1  christos 				*vap = round_page(va + d);
   1840  1.1  christos 			PMAP_COUNT(prefer_adjustments);
   1841  1.1  christos 		}
   1842  1.1  christos 	}
   1843  1.1  christos }
   1844  1.1  christos #endif /* PMAP_PREFER */
   1845  1.1  christos 
   1846  1.1  christos #ifdef PMAP_MAP_POOLPAGE
   1847  1.1  christos vaddr_t
   1848  1.1  christos pmap_map_poolpage(paddr_t pa)
   1849  1.1  christos {
   1850  1.1  christos 
   1851  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1852  1.1  christos 	KASSERT(pg);
   1853  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1854  1.1  christos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1855  1.1  christos 
   1856  1.1  christos 	const vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
   1857  1.1  christos 	pmap_md_vca_add(pg, va, NULL);
   1858  1.1  christos 	return va;
   1859  1.1  christos }
   1860  1.1  christos 
   1861  1.1  christos paddr_t
   1862  1.1  christos pmap_unmap_poolpage(vaddr_t va)
   1863  1.1  christos {
   1864  1.1  christos 
   1865  1.1  christos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
   1866  1.1  christos 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
   1867  1.1  christos 
   1868  1.1  christos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
   1869  1.1  christos 	KASSERT(pg);
   1870  1.1  christos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
   1871  1.1  christos 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
   1872  1.1  christos 	pmap_md_unmap_poolpage(va, NBPG);
   1873  1.1  christos 	pmap_md_vca_remove(pg, va);
   1874  1.1  christos 
   1875  1.1  christos 	return pa;
   1876  1.1  christos }
   1877  1.1  christos #endif /* PMAP_MAP_POOLPAGE */
   1878