Home | History | Annotate | Line # | Download | only in booke
booke_pmap.c revision 1.18.6.2
      1  1.18.6.2  skrll /*	$NetBSD: booke_pmap.c,v 1.18.6.2 2015/12/27 12:09:40 skrll Exp $	*/
      2       1.2   matt /*-
      3       1.2   matt  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
      4       1.2   matt  * All rights reserved.
      5       1.2   matt  *
      6       1.2   matt  * This code is derived from software contributed to The NetBSD Foundation
      7       1.2   matt  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
      8       1.2   matt  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
      9       1.2   matt  *
     10       1.2   matt  * This material is based upon work supported by the Defense Advanced Research
     11       1.2   matt  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
     12       1.2   matt  * Contract No. N66001-09-C-2073.
     13       1.2   matt  * Approved for Public Release, Distribution Unlimited
     14       1.2   matt  *
     15       1.2   matt  * Redistribution and use in source and binary forms, with or without
     16       1.2   matt  * modification, are permitted provided that the following conditions
     17       1.2   matt  * are met:
     18       1.2   matt  * 1. Redistributions of source code must retain the above copyright
     19       1.2   matt  *    notice, this list of conditions and the following disclaimer.
     20       1.2   matt  * 2. Redistributions in binary form must reproduce the above copyright
     21       1.2   matt  *    notice, this list of conditions and the following disclaimer in the
     22       1.2   matt  *    documentation and/or other materials provided with the distribution.
     23       1.2   matt  *
     24       1.2   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     25       1.2   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     26       1.2   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     27       1.2   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     28       1.2   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29       1.2   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30       1.2   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31       1.2   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32       1.2   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33       1.2   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34       1.2   matt  * POSSIBILITY OF SUCH DAMAGE.
     35       1.2   matt  */
     36       1.2   matt 
     37       1.4   matt #define __PMAP_PRIVATE
     38       1.3   matt 
     39       1.2   matt #include <sys/cdefs.h>
     40       1.2   matt 
     41  1.18.6.2  skrll __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.18.6.2 2015/12/27 12:09:40 skrll Exp $");
     42       1.2   matt 
     43       1.2   matt #include <sys/param.h>
     44       1.2   matt #include <sys/kcore.h>
     45       1.2   matt #include <sys/buf.h>
     46  1.18.6.1  skrll #include <sys/mutex.h>
     47       1.2   matt 
     48       1.6   matt #include <uvm/uvm.h>
     49       1.2   matt 
     50       1.2   matt #include <machine/pmap.h>
     51       1.2   matt 
     52  1.18.6.1  skrll #if defined(MULTIPROCESSOR)
     53  1.18.6.1  skrll kmutex_t pmap_tlb_miss_lock;
     54  1.18.6.1  skrll #endif
     55  1.18.6.1  skrll 
     56       1.2   matt /*
     57       1.2   matt  * Initialize the kernel pmap.
     58       1.2   matt  */
     59       1.2   matt #ifdef MULTIPROCESSOR
     60      1.17   matt #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
     61       1.2   matt #else
     62       1.2   matt #define	PMAP_SIZE	sizeof(struct pmap)
     63       1.2   matt #endif
     64       1.2   matt 
     65      1.15   matt CTASSERT(sizeof(pmap_segtab_t) == NBPG);
     66       1.2   matt 
     67      1.15   matt pmap_segtab_t pmap_kernel_segtab;
     68      1.13   matt 
     69       1.2   matt void
     70       1.2   matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
     71       1.2   matt {
     72       1.2   matt 	struct pmap * const pmap = p->p_vmspace->vm_map.pmap;
     73       1.2   matt 	vsize_t off = va & PAGE_SIZE;
     74       1.2   matt 
     75       1.2   matt 	kpreempt_disable();
     76       1.2   matt 	for (const vaddr_t eva = va + len; va < eva; off = 0) {
     77       1.2   matt 		const vaddr_t segeva = min(va + len, va - off + PAGE_SIZE);
     78       1.2   matt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
     79       1.2   matt 		if (ptep == NULL) {
     80       1.2   matt 			va = segeva;
     81       1.2   matt 			continue;
     82       1.2   matt 		}
     83       1.2   matt 		pt_entry_t pt_entry = *ptep;
     84       1.2   matt 		if (!pte_valid_p(pt_entry) || !pte_exec_p(pt_entry)) {
     85       1.2   matt 			va = segeva;
     86       1.2   matt 			continue;
     87       1.2   matt 		}
     88       1.2   matt 		kpreempt_enable();
     89       1.2   matt 		dcache_wb(pte_to_paddr(pt_entry), segeva - va);
     90       1.2   matt 		icache_inv(pte_to_paddr(pt_entry), segeva - va);
     91       1.2   matt 		kpreempt_disable();
     92       1.2   matt 		va = segeva;
     93       1.2   matt 	}
     94       1.2   matt 	kpreempt_enable();
     95       1.2   matt }
     96       1.2   matt 
     97       1.2   matt void
     98      1.17   matt pmap_md_page_syncicache(struct vm_page *pg, const kcpuset_t *onproc)
     99       1.2   matt {
    100       1.4   matt 	/*
    101       1.4   matt 	 * If onproc is empty, we could do a
    102       1.4   matt 	 * pmap_page_protect(pg, VM_PROT_NONE) and remove all
    103       1.4   matt 	 * mappings of the page and clear its execness.  Then
    104       1.4   matt 	 * the next time page is faulted, it will get icache
    105       1.4   matt 	 * synched.  But this is easier. :)
    106       1.4   matt 	 */
    107       1.2   matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
    108       1.2   matt 	dcache_wb_page(pa);
    109       1.2   matt 	icache_inv_page(pa);
    110       1.2   matt }
    111       1.2   matt 
    112       1.2   matt vaddr_t
    113       1.2   matt pmap_md_direct_map_paddr(paddr_t pa)
    114       1.2   matt {
    115       1.2   matt 	return (vaddr_t) pa;
    116       1.2   matt }
    117       1.2   matt 
    118       1.2   matt bool
    119       1.2   matt pmap_md_direct_mapped_vaddr_p(vaddr_t va)
    120       1.2   matt {
    121       1.2   matt 	return va < VM_MIN_KERNEL_ADDRESS || VM_MAX_KERNEL_ADDRESS <= va;
    122       1.2   matt }
    123       1.2   matt 
    124       1.2   matt paddr_t
    125       1.2   matt pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
    126       1.2   matt {
    127       1.2   matt 	return (paddr_t) va;
    128       1.2   matt }
    129       1.2   matt 
    130      1.13   matt #ifdef PMAP_MINIMALTLB
    131      1.13   matt static pt_entry_t *
    132      1.15   matt kvtopte(const pmap_segtab_t *stp, vaddr_t va)
    133      1.13   matt {
    134      1.15   matt 	pt_entry_t * const ptep = stp->seg_tab[va >> SEGSHIFT];
    135      1.13   matt 	if (ptep == NULL)
    136      1.13   matt 		return NULL;
    137      1.13   matt 	return &ptep[(va & SEGOFSET) >> PAGE_SHIFT];
    138      1.13   matt }
    139      1.13   matt 
    140      1.13   matt vaddr_t
    141      1.13   matt pmap_kvptefill(vaddr_t sva, vaddr_t eva, pt_entry_t pt_entry)
    142      1.13   matt {
    143      1.15   matt 	const pmap_segtab_t * const stp = pmap_kernel()->pm_segtab;
    144      1.13   matt 	KASSERT(sva == trunc_page(sva));
    145      1.13   matt 	pt_entry_t *ptep = kvtopte(stp, sva);
    146      1.13   matt 	for (; sva < eva; sva += NBPG) {
    147      1.13   matt 		*ptep++ = pt_entry ? (sva | pt_entry) : 0;
    148      1.13   matt 	}
    149      1.13   matt 	return sva;
    150      1.13   matt }
    151      1.13   matt #endif
    152      1.13   matt 
    153       1.2   matt /*
    154       1.2   matt  *	Bootstrap the system enough to run with virtual memory.
    155       1.2   matt  *	firstaddr is the first unused kseg0 address (not page aligned).
    156       1.2   matt  */
    157      1.13   matt vaddr_t
    158       1.2   matt pmap_bootstrap(vaddr_t startkernel, vaddr_t endkernel,
    159      1.13   matt 	phys_ram_seg_t *avail, size_t cnt)
    160       1.2   matt {
    161      1.15   matt 	pmap_segtab_t * const stp = &pmap_kernel_segtab;
    162      1.13   matt 
    163      1.13   matt 	/*
    164      1.13   matt 	 * Initialize the kernel segment table.
    165      1.13   matt 	 */
    166      1.13   matt 	pmap_kernel()->pm_segtab = stp;
    167      1.13   matt 	curcpu()->ci_pmap_kern_segtab = stp;
    168       1.2   matt 
    169      1.13   matt 	KASSERT(endkernel == trunc_page(endkernel));
    170       1.2   matt 
    171  1.18.6.1  skrll 	/* init the lock */
    172  1.18.6.1  skrll 	pmap_tlb_info_init(&pmap_tlb0_info);
    173  1.18.6.1  skrll 
    174  1.18.6.1  skrll #if defined(MULTIPROCESSOR)
    175  1.18.6.1  skrll 	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
    176  1.18.6.1  skrll #endif
    177  1.18.6.1  skrll 
    178       1.2   matt 	/*
    179      1.12   para 	 * Compute the number of pages kmem_arena will have.
    180      1.12   para 	 */
    181      1.12   para 	kmeminit_nkmempages();
    182      1.12   para 
    183      1.12   para 	/*
    184       1.2   matt 	 * Figure out how many PTE's are necessary to map the kernel.
    185       1.2   matt 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
    186       1.2   matt 	 */
    187       1.2   matt 
    188       1.2   matt 	/* Get size of buffer cache and set an upper limit */
    189       1.2   matt 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
    190       1.2   matt 	vsize_t bufsz = buf_memcalc();
    191       1.2   matt 	buf_setvalimit(bufsz);
    192       1.2   matt 
    193      1.13   matt 	vsize_t kv_nsegtabs = pmap_round_seg(VM_PHYS_SIZE
    194       1.2   matt 	    + (ubc_nwins << ubc_winshift)
    195       1.2   matt 	    + bufsz
    196       1.2   matt 	    + 16 * NCARGS
    197       1.2   matt 	    + pager_map_size
    198       1.2   matt 	    + maxproc * USPACE
    199      1.13   matt 	    + NBPG * nkmempages) >> SEGSHIFT;
    200       1.2   matt 
    201       1.2   matt 	/*
    202       1.2   matt 	 * Initialize `FYI' variables.	Note we're relying on
    203       1.2   matt 	 * the fact that BSEARCH sorts the vm_physmem[] array
    204       1.2   matt 	 * for us.  Must do this before uvm_pageboot_alloc()
    205       1.2   matt 	 * can be called.
    206       1.2   matt 	 */
    207       1.2   matt 	pmap_limits.avail_start = vm_physmem[0].start << PGSHIFT;
    208       1.2   matt 	pmap_limits.avail_end = vm_physmem[vm_nphysseg - 1].end << PGSHIFT;
    209      1.13   matt 	const size_t max_nsegtabs =
    210       1.2   matt 	    (pmap_round_seg(VM_MAX_KERNEL_ADDRESS)
    211       1.2   matt 		- pmap_trunc_seg(VM_MIN_KERNEL_ADDRESS)) / NBSEG;
    212      1.13   matt 	if (kv_nsegtabs >= max_nsegtabs) {
    213       1.2   matt 		pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS;
    214      1.13   matt 		kv_nsegtabs = max_nsegtabs;
    215       1.2   matt 	} else {
    216       1.2   matt 		pmap_limits.virtual_end = VM_MIN_KERNEL_ADDRESS
    217      1.13   matt 		    + kv_nsegtabs * NBSEG;
    218       1.2   matt 	}
    219       1.2   matt 
    220       1.2   matt 	/*
    221       1.2   matt 	 * Now actually allocate the kernel PTE array (must be done
    222       1.2   matt 	 * after virtual_end is initialized).
    223       1.2   matt 	 */
    224      1.13   matt 	const vaddr_t kv_segtabs = avail[0].start;
    225      1.13   matt 	KASSERT(kv_segtabs == endkernel);
    226      1.13   matt 	KASSERT(avail[0].size >= NBPG * kv_nsegtabs);
    227      1.13   matt 	printf(" kv_nsegtabs=%#"PRIxVSIZE, kv_nsegtabs);
    228      1.13   matt 	printf(" kv_segtabs=%#"PRIxVADDR, kv_segtabs);
    229      1.13   matt 	avail[0].start += NBPG * kv_nsegtabs;
    230      1.13   matt 	avail[0].size -= NBPG * kv_nsegtabs;
    231      1.13   matt 	endkernel += NBPG * kv_nsegtabs;
    232       1.2   matt 
    233       1.2   matt 	/*
    234       1.2   matt 	 * Initialize the kernel's two-level page level.  This only wastes
    235       1.2   matt 	 * an extra page for the segment table and allows the user/kernel
    236       1.2   matt 	 * access to be common.
    237       1.2   matt 	 */
    238      1.15   matt 	pt_entry_t **ptp = &stp->seg_tab[VM_MIN_KERNEL_ADDRESS >> SEGSHIFT];
    239      1.13   matt 	pt_entry_t *ptep = (void *)kv_segtabs;
    240      1.13   matt 	memset(ptep, 0, NBPG * kv_nsegtabs);
    241      1.13   matt 	for (size_t i = 0; i < kv_nsegtabs; i++, ptep += NPTEPG) {
    242      1.13   matt 		*ptp++ = ptep;
    243       1.2   matt 	}
    244       1.2   matt 
    245      1.13   matt #if PMAP_MINIMALTLB
    246      1.13   matt 	const vsize_t dm_nsegtabs = (physmem + NPTEPG - 1) / NPTEPG;
    247      1.13   matt 	const vaddr_t dm_segtabs = avail[0].start;
    248      1.13   matt 	printf(" dm_nsegtabs=%#"PRIxVSIZE, dm_nsegtabs);
    249      1.13   matt 	printf(" dm_segtabs=%#"PRIxVADDR, dm_segtabs);
    250      1.13   matt 	KASSERT(dm_segtabs == endkernel);
    251      1.13   matt 	KASSERT(avail[0].size >= NBPG * dm_nsegtabs);
    252      1.13   matt 	avail[0].start += NBPG * dm_nsegtabs;
    253      1.13   matt 	avail[0].size -= NBPG * dm_nsegtabs;
    254      1.13   matt 	endkernel += NBPG * dm_nsegtabs;
    255      1.13   matt 
    256      1.15   matt 	ptp = stp->seg_tab;
    257      1.13   matt 	ptep = (void *)dm_segtabs;
    258      1.13   matt 	memset(ptep, 0, NBPG * dm_nsegtabs);
    259      1.13   matt 	for (size_t i = 0; i < dm_nsegtabs; i++, ptp++, ptep += NPTEPG) {
    260       1.2   matt 		*ptp = ptep;
    261       1.2   matt 	}
    262       1.2   matt 
    263       1.2   matt 	/*
    264       1.2   matt 	 */
    265      1.13   matt 	extern uint32_t _fdata[], _etext[];
    266      1.13   matt 	vaddr_t va;
    267      1.13   matt 
    268      1.13   matt 	/* Now make everything before the kernel inaccessible. */
    269      1.13   matt 	va = pmap_kvptefill(NBPG, startkernel, 0);
    270      1.13   matt 
    271      1.13   matt 	/* Kernel text is readonly & executable */
    272      1.13   matt 	va = pmap_kvptefill(va, round_page((vaddr_t)_etext),
    273      1.13   matt 	    PTE_M | PTE_xR | PTE_xX);
    274      1.13   matt 
    275      1.13   matt 	/* Kernel .rdata is readonly */
    276      1.13   matt 	va = pmap_kvptefill(va, trunc_page((vaddr_t)_fdata), PTE_M | PTE_xR);
    277      1.13   matt 
    278      1.13   matt 	/* Kernel .data/.bss + page tables are read-write */
    279      1.13   matt 	va = pmap_kvptefill(va, round_page(endkernel), PTE_M | PTE_xR | PTE_xW);
    280      1.13   matt 
    281      1.13   matt 	/* message buffer page table pages are read-write */
    282      1.13   matt 	(void) pmap_kvptefill(msgbuf_paddr, msgbuf_paddr+round_page(MSGBUFSIZE),
    283      1.13   matt 	    PTE_M | PTE_xR | PTE_xW);
    284      1.13   matt #endif
    285      1.13   matt 
    286      1.13   matt 	for (size_t i = 0; i < cnt; i++) {
    287      1.13   matt 		printf(" uvm_page_physload(%#lx,%#lx,%#lx,%#lx,%d)",
    288      1.13   matt 		    atop(avail[i].start),
    289      1.13   matt 		    atop(avail[i].start + avail[i].size) - 1,
    290      1.13   matt 		    atop(avail[i].start),
    291      1.13   matt 		    atop(avail[i].start + avail[i].size) - 1,
    292      1.13   matt 		    VM_FREELIST_DEFAULT);
    293      1.13   matt 		uvm_page_physload(
    294      1.13   matt 		    atop(avail[i].start),
    295      1.13   matt 		    atop(avail[i].start + avail[i].size) - 1,
    296      1.13   matt 		    atop(avail[i].start),
    297      1.13   matt 		    atop(avail[i].start + avail[i].size) - 1,
    298      1.13   matt 		    VM_FREELIST_DEFAULT);
    299       1.2   matt 	}
    300      1.13   matt 
    301      1.13   matt 	pmap_pvlist_lock_init(curcpu()->ci_ci.dcache_line_size);
    302       1.2   matt 
    303       1.2   matt 	/*
    304       1.2   matt 	 * Initialize the pools.
    305       1.2   matt 	 */
    306       1.2   matt 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
    307       1.2   matt 	    &pool_allocator_nointr, IPL_NONE);
    308       1.2   matt 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
    309       1.2   matt 	    &pmap_pv_page_allocator, IPL_NONE);
    310       1.2   matt 
    311       1.2   matt 	tlb_set_asid(0);
    312      1.13   matt 
    313      1.13   matt 	return endkernel;
    314       1.2   matt }
    315       1.2   matt 
    316       1.2   matt struct vm_page *
    317       1.2   matt pmap_md_alloc_poolpage(int flags)
    318       1.2   matt {
    319       1.2   matt 	/*
    320       1.2   matt 	 * Any managed page works for us.
    321       1.2   matt 	 */
    322       1.2   matt 	return uvm_pagealloc(NULL, 0, NULL, flags);
    323       1.2   matt }
    324       1.2   matt 
    325      1.13   matt vaddr_t
    326      1.13   matt pmap_md_map_poolpage(paddr_t pa, vsize_t size)
    327      1.13   matt {
    328      1.13   matt 	const vaddr_t sva = (vaddr_t) pa;
    329      1.13   matt #ifdef PMAP_MINIMALTLB
    330      1.13   matt 	const vaddr_t eva = sva + size;
    331      1.13   matt 	pmap_kvptefill(sva, eva, PTE_M | PTE_xR | PTE_xW);
    332      1.13   matt #endif
    333      1.13   matt 	return sva;
    334      1.13   matt }
    335      1.13   matt 
    336      1.13   matt void
    337      1.13   matt pmap_md_unmap_poolpage(vaddr_t va, vsize_t size)
    338      1.13   matt {
    339      1.13   matt #ifdef PMAP_MINIMALTLB
    340      1.13   matt 	struct pmap * const pm = pmap_kernel();
    341      1.13   matt 	const vaddr_t eva = va + size;
    342      1.13   matt 	pmap_kvptefill(va, eva, 0);
    343      1.13   matt 	for (;va < eva; va += NBPG) {
    344      1.13   matt 		pmap_tlb_invalidate_addr(pm, va);
    345      1.13   matt 	}
    346      1.13   matt 	pmap_update(pm);
    347      1.13   matt #endif
    348      1.13   matt }
    349      1.13   matt 
    350       1.2   matt void
    351       1.2   matt pmap_zero_page(paddr_t pa)
    352       1.2   matt {
    353      1.13   matt 	vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
    354      1.13   matt 	dcache_zero_page(va);
    355       1.5   matt 
    356      1.13   matt 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(va))));
    357      1.13   matt 	pmap_md_unmap_poolpage(va, NBPG);
    358       1.2   matt }
    359       1.2   matt 
    360       1.2   matt void
    361       1.2   matt pmap_copy_page(paddr_t src, paddr_t dst)
    362       1.2   matt {
    363       1.2   matt 	const size_t line_size = curcpu()->ci_ci.dcache_line_size;
    364      1.13   matt 	vaddr_t src_va = pmap_md_map_poolpage(src, NBPG);
    365      1.13   matt 	vaddr_t dst_va = pmap_md_map_poolpage(dst, NBPG);
    366      1.13   matt 	const vaddr_t end = src_va + PAGE_SIZE;
    367       1.2   matt 
    368      1.13   matt 	while (src_va < end) {
    369  1.18.6.1  skrll 		__asm __volatile(
    370  1.18.6.1  skrll 			"dcbt	%2,%0"	"\n\t"	/* touch next src cacheline */
    371       1.2   matt 			"dcba	0,%1"	"\n\t" 	/* don't fetch dst cacheline */
    372      1.13   matt 		    :: "b"(src_va), "b"(dst_va), "b"(line_size));
    373       1.2   matt 		for (u_int i = 0;
    374       1.2   matt 		     i < line_size;
    375      1.13   matt 		     src_va += 32, dst_va += 32, i += 32) {
    376      1.16   matt 			register_t tmp;
    377      1.16   matt 			__asm __volatile(
    378      1.16   matt 				"mr	%[tmp],31"	"\n\t"
    379      1.16   matt 				"lmw	24,0(%[src])"	"\n\t"
    380      1.16   matt 				"stmw	24,0(%[dst])"	"\n\t"
    381      1.16   matt 				"mr	31,%[tmp]"	"\n\t"
    382      1.16   matt 			    : [tmp] "=&r"(tmp)
    383      1.16   matt 			    : [src] "b"(src_va), [dst] "b"(dst_va)
    384       1.2   matt 			    : "r24", "r25", "r26", "r27",
    385      1.16   matt 			      "r28", "r29", "r30", "memory");
    386       1.2   matt 		}
    387       1.2   matt 	}
    388      1.13   matt 	pmap_md_unmap_poolpage(src_va, NBPG);
    389      1.13   matt 	pmap_md_unmap_poolpage(dst_va, NBPG);
    390       1.5   matt 
    391      1.13   matt 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst))));
    392       1.2   matt }
    393       1.2   matt 
    394       1.2   matt void
    395       1.2   matt pmap_md_init(void)
    396       1.2   matt {
    397       1.2   matt 
    398       1.2   matt 	/* nothing for now */
    399       1.2   matt }
    400       1.2   matt 
    401       1.2   matt bool
    402       1.2   matt pmap_md_io_vaddr_p(vaddr_t va)
    403       1.2   matt {
    404       1.2   matt 	return va >= pmap_limits.avail_end
    405       1.2   matt 	    && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS);
    406       1.2   matt }
    407       1.2   matt 
    408       1.7   matt bool
    409       1.7   matt pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
    410       1.7   matt {
    411       1.7   matt 	pmap_t pm = ctx;
    412       1.7   matt         struct pmap_asid_info * const pai = PMAP_PAI(pm, curcpu()->ci_tlb_info);
    413       1.7   matt 
    414       1.7   matt 	if (asid != pai->pai_asid)
    415       1.7   matt 		return true;
    416       1.7   matt 
    417       1.7   matt 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
    418       1.7   matt 	KASSERT(ptep != NULL);
    419       1.7   matt 	pt_entry_t xpte = *ptep;
    420       1.7   matt 	xpte &= ~((xpte & (PTE_UNSYNCED|PTE_UNMODIFIED)) << 1);
    421       1.7   matt 	xpte ^= xpte & (PTE_UNSYNCED|PTE_UNMODIFIED|PTE_WIRED);
    422       1.7   matt 
    423       1.7   matt 	KASSERTMSG(pte == xpte,
    424      1.10    jym 	    "pm=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#x) != real pte (%#x/%#x)",
    425      1.10    jym 	    pm, va, asid, pte, xpte, *ptep);
    426       1.7   matt 
    427       1.7   matt 	return true;
    428       1.7   matt }
    429       1.8   matt 
    430       1.8   matt #ifdef MULTIPROCESSOR
    431       1.8   matt void
    432       1.8   matt pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
    433       1.8   matt {
    434       1.8   matt 	/* nothing */
    435       1.8   matt }
    436  1.18.6.1  skrll 
    437  1.18.6.1  skrll void
    438  1.18.6.1  skrll pmap_md_tlb_miss_lock_enter(void)
    439  1.18.6.1  skrll {
    440  1.18.6.1  skrll 
    441  1.18.6.1  skrll 	mutex_spin_enter(&pmap_tlb_miss_lock);
    442  1.18.6.1  skrll }
    443  1.18.6.1  skrll 
    444  1.18.6.1  skrll void
    445  1.18.6.1  skrll pmap_md_tlb_miss_lock_exit(void)
    446  1.18.6.1  skrll {
    447  1.18.6.1  skrll 
    448  1.18.6.1  skrll 	mutex_spin_exit(&pmap_tlb_miss_lock);
    449  1.18.6.1  skrll }
    450       1.8   matt #endif /* MULTIPROCESSOR */
    451